applied-ai-018 commited on
Commit
1488479
·
verified ·
1 Parent(s): ec858d8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/dots.png +3 -0
  2. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__init__.py +71 -0
  3. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py +153 -0
  7. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so +0 -0
  8. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py +746 -0
  9. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__init__.py +0 -0
  10. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py +805 -0
  12. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__init__.py +22 -0
  13. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/_svds.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/_svds_doc.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/_svds_doc.py +400 -0
  17. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/COPYING +45 -0
  18. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__init__.py +20 -0
  19. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-x86_64-linux-gnu.so +0 -0
  22. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/arpack.py +1702 -0
  23. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__init__.py +0 -0
  24. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/test_arpack.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py +718 -0
  27. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__init__.py +16 -0
  28. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py +1112 -0
  31. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py +0 -0
  32. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/test_lobpcg.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py +645 -0
  35. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__init__.py +0 -0
  36. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/test_svds.py +862 -0
  39. llmeval-env/lib/python3.10/site-packages/scipy/spatial/__init__.py +129 -0
  40. llmeval-env/lib/python3.10/site-packages/scipy/spatial/_ckdtree.pyi +214 -0
  41. llmeval-env/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so +0 -0
  42. llmeval-env/lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so +0 -0
  43. llmeval-env/lib/python3.10/site-packages/scipy/spatial/_geometric_slerp.py +240 -0
  44. llmeval-env/lib/python3.10/site-packages/scipy/spatial/_hausdorff.cpython-310-x86_64-linux-gnu.so +0 -0
  45. llmeval-env/lib/python3.10/site-packages/scipy/spatial/_kdtree.py +920 -0
  46. llmeval-env/lib/python3.10/site-packages/scipy/spatial/_plotutils.py +270 -0
  47. llmeval-env/lib/python3.10/site-packages/scipy/spatial/_procrustes.py +132 -0
  48. llmeval-env/lib/python3.10/site-packages/scipy/spatial/_qhull.pyi +213 -0
  49. llmeval-env/lib/python3.10/site-packages/scipy/spatial/_spherical_voronoi.py +341 -0
  50. llmeval-env/lib/python3.10/site-packages/scipy/spatial/_voronoi.cpython-310-x86_64-linux-gnu.so +0 -0
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/dots.png ADDED

Git LFS Details

  • SHA256: b20b56fadc7471c0694d3e8148d9e28a83d7967bac16bf8852094afea3950414
  • Pointer size: 129 Bytes
  • Size of remote file: 2.11 kB
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Linear Solvers
3
+ ==============
4
+
5
+ The default solver is SuperLU (included in the scipy distribution),
6
+ which can solve real or complex linear systems in both single and
7
+ double precisions. It is automatically replaced by UMFPACK, if
8
+ available. Note that UMFPACK works in double precision only, so
9
+ switch it off by::
10
+
11
+ >>> from scipy.sparse.linalg import spsolve, use_solver
12
+ >>> use_solver(useUmfpack=False)
13
+
14
+ to solve in the single precision. See also use_solver documentation.
15
+
16
+ Example session::
17
+
18
+ >>> from scipy.sparse import csc_matrix, spdiags
19
+ >>> from numpy import array
20
+ >>>
21
+ >>> print("Inverting a sparse linear system:")
22
+ >>> print("The sparse matrix (constructed from diagonals):")
23
+ >>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
24
+ >>> b = array([1, 2, 3, 4, 5])
25
+ >>> print("Solve: single precision complex:")
26
+ >>> use_solver( useUmfpack = False )
27
+ >>> a = a.astype('F')
28
+ >>> x = spsolve(a, b)
29
+ >>> print(x)
30
+ >>> print("Error: ", a@x-b)
31
+ >>>
32
+ >>> print("Solve: double precision complex:")
33
+ >>> use_solver( useUmfpack = True )
34
+ >>> a = a.astype('D')
35
+ >>> x = spsolve(a, b)
36
+ >>> print(x)
37
+ >>> print("Error: ", a@x-b)
38
+ >>>
39
+ >>> print("Solve: double precision:")
40
+ >>> a = a.astype('d')
41
+ >>> x = spsolve(a, b)
42
+ >>> print(x)
43
+ >>> print("Error: ", a@x-b)
44
+ >>>
45
+ >>> print("Solve: single precision:")
46
+ >>> use_solver( useUmfpack = False )
47
+ >>> a = a.astype('f')
48
+ >>> x = spsolve(a, b.astype('f'))
49
+ >>> print(x)
50
+ >>> print("Error: ", a@x-b)
51
+
52
+ """
53
+
54
+ #import umfpack
55
+ #__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) )
56
+ #del umfpack
57
+
58
+ from .linsolve import *
59
+ from ._superlu import SuperLU
60
+ from . import _add_newdocs
61
+ from . import linsolve
62
+
63
+ __all__ = [
64
+ 'MatrixRankWarning', 'SuperLU', 'factorized',
65
+ 'spilu', 'splu', 'spsolve',
66
+ 'spsolve_triangular', 'use_solver'
67
+ ]
68
+
69
+ from scipy._lib._testutils import PytestTester
70
+ test = PytestTester(__name__)
71
+ del PytestTester
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc ADDED
Binary file (3.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc ADDED
Binary file (21.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.lib import add_newdoc
2
+
3
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU',
4
+ """
5
+ LU factorization of a sparse matrix.
6
+
7
+ Factorization is represented as::
8
+
9
+ Pr @ A @ Pc = L @ U
10
+
11
+ To construct these `SuperLU` objects, call the `splu` and `spilu`
12
+ functions.
13
+
14
+ Attributes
15
+ ----------
16
+ shape
17
+ nnz
18
+ perm_c
19
+ perm_r
20
+ L
21
+ U
22
+
23
+ Methods
24
+ -------
25
+ solve
26
+
27
+ Notes
28
+ -----
29
+
30
+ .. versionadded:: 0.14.0
31
+
32
+ Examples
33
+ --------
34
+ The LU decomposition can be used to solve matrix equations. Consider:
35
+
36
+ >>> import numpy as np
37
+ >>> from scipy.sparse import csc_matrix
38
+ >>> from scipy.sparse.linalg import splu
39
+ >>> A = csc_matrix([[1,2,0,4], [1,0,0,1], [1,0,2,1], [2,2,1,0.]])
40
+
41
+ This can be solved for a given right-hand side:
42
+
43
+ >>> lu = splu(A)
44
+ >>> b = np.array([1, 2, 3, 4])
45
+ >>> x = lu.solve(b)
46
+ >>> A.dot(x)
47
+ array([ 1., 2., 3., 4.])
48
+
49
+ The ``lu`` object also contains an explicit representation of the
50
+ decomposition. The permutations are represented as mappings of
51
+ indices:
52
+
53
+ >>> lu.perm_r
54
+ array([2, 1, 3, 0], dtype=int32) # may vary
55
+ >>> lu.perm_c
56
+ array([0, 1, 3, 2], dtype=int32) # may vary
57
+
58
+ The L and U factors are sparse matrices in CSC format:
59
+
60
+ >>> lu.L.toarray()
61
+ array([[ 1. , 0. , 0. , 0. ], # may vary
62
+ [ 0.5, 1. , 0. , 0. ],
63
+ [ 0.5, -1. , 1. , 0. ],
64
+ [ 0.5, 1. , 0. , 1. ]])
65
+ >>> lu.U.toarray()
66
+ array([[ 2. , 2. , 0. , 1. ], # may vary
67
+ [ 0. , -1. , 1. , -0.5],
68
+ [ 0. , 0. , 5. , -1. ],
69
+ [ 0. , 0. , 0. , 2. ]])
70
+
71
+ The permutation matrices can be constructed:
72
+
73
+ >>> Pr = csc_matrix((np.ones(4), (lu.perm_r, np.arange(4))))
74
+ >>> Pc = csc_matrix((np.ones(4), (np.arange(4), lu.perm_c)))
75
+
76
+ We can reassemble the original matrix:
77
+
78
+ >>> (Pr.T @ (lu.L @ lu.U) @ Pc.T).toarray()
79
+ array([[ 1., 2., 0., 4.],
80
+ [ 1., 0., 0., 1.],
81
+ [ 1., 0., 2., 1.],
82
+ [ 2., 2., 1., 0.]])
83
+ """)
84
+
85
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('solve',
86
+ """
87
+ solve(rhs[, trans])
88
+
89
+ Solves linear system of equations with one or several right-hand sides.
90
+
91
+ Parameters
92
+ ----------
93
+ rhs : ndarray, shape (n,) or (n, k)
94
+ Right hand side(s) of equation
95
+ trans : {'N', 'T', 'H'}, optional
96
+ Type of system to solve::
97
+
98
+ 'N': A @ x == rhs (default)
99
+ 'T': A^T @ x == rhs
100
+ 'H': A^H @ x == rhs
101
+
102
+ i.e., normal, transposed, and hermitian conjugate.
103
+
104
+ Returns
105
+ -------
106
+ x : ndarray, shape ``rhs.shape``
107
+ Solution vector(s)
108
+ """))
109
+
110
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('L',
111
+ """
112
+ Lower triangular factor with unit diagonal as a
113
+ `scipy.sparse.csc_matrix`.
114
+
115
+ .. versionadded:: 0.14.0
116
+ """))
117
+
118
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('U',
119
+ """
120
+ Upper triangular factor as a `scipy.sparse.csc_matrix`.
121
+
122
+ .. versionadded:: 0.14.0
123
+ """))
124
+
125
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('shape',
126
+ """
127
+ Shape of the original matrix as a tuple of ints.
128
+ """))
129
+
130
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('nnz',
131
+ """
132
+ Number of nonzero elements in the matrix.
133
+ """))
134
+
135
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_c',
136
+ """
137
+ Permutation Pc represented as an array of indices.
138
+
139
+ The column permutation matrix can be reconstructed via:
140
+
141
+ >>> Pc = np.zeros((n, n))
142
+ >>> Pc[np.arange(n), perm_c] = 1
143
+ """))
144
+
145
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_r',
146
+ """
147
+ Permutation Pr represented as an array of indices.
148
+
149
+ The row permutation matrix can be reconstructed via:
150
+
151
+ >>> Pr = np.zeros((n, n))
152
+ >>> Pr[perm_r, np.arange(n)] = 1
153
+ """))
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (379 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py ADDED
@@ -0,0 +1,746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from warnings import warn
2
+
3
+ import numpy as np
4
+ from numpy import asarray
5
+ from scipy.sparse import (issparse,
6
+ SparseEfficiencyWarning, csc_matrix, csr_matrix)
7
+ from scipy.sparse._sputils import is_pydata_spmatrix, convert_pydata_sparse_to_scipy
8
+ from scipy.linalg import LinAlgError
9
+ import copy
10
+
11
+ from . import _superlu
12
+
13
+ noScikit = False
14
+ try:
15
+ import scikits.umfpack as umfpack
16
+ except ImportError:
17
+ noScikit = True
18
+
19
+ useUmfpack = not noScikit
20
+
21
+ __all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized',
22
+ 'MatrixRankWarning', 'spsolve_triangular']
23
+
24
+
25
+ class MatrixRankWarning(UserWarning):
26
+ pass
27
+
28
+
29
+ def use_solver(**kwargs):
30
+ """
31
+ Select default sparse direct solver to be used.
32
+
33
+ Parameters
34
+ ----------
35
+ useUmfpack : bool, optional
36
+ Use UMFPACK [1]_, [2]_, [3]_, [4]_. over SuperLU. Has effect only
37
+ if ``scikits.umfpack`` is installed. Default: True
38
+ assumeSortedIndices : bool, optional
39
+ Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix.
40
+ Has effect only if useUmfpack is True and ``scikits.umfpack`` is
41
+ installed. Default: False
42
+
43
+ Notes
44
+ -----
45
+ The default sparse solver is UMFPACK when available
46
+ (``scikits.umfpack`` is installed). This can be changed by passing
47
+ useUmfpack = False, which then causes the always present SuperLU
48
+ based solver to be used.
49
+
50
+ UMFPACK requires a CSR/CSC matrix to have sorted column/row indices. If
51
+ sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
52
+ to gain some speed.
53
+
54
+ References
55
+ ----------
56
+ .. [1] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
57
+ multifrontal method with a column pre-ordering strategy, ACM
58
+ Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
59
+ https://dl.acm.org/doi/abs/10.1145/992200.992206
60
+
61
+ .. [2] T. A. Davis, A column pre-ordering strategy for the
62
+ unsymmetric-pattern multifrontal method, ACM Trans.
63
+ on Mathematical Software, 30(2), 2004, pp. 165--195.
64
+ https://dl.acm.org/doi/abs/10.1145/992200.992205
65
+
66
+ .. [3] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
67
+ method for unsymmetric sparse matrices, ACM Trans. on
68
+ Mathematical Software, 25(1), 1999, pp. 1--19.
69
+ https://doi.org/10.1145/305658.287640
70
+
71
+ .. [4] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
72
+ method for sparse LU factorization, SIAM J. Matrix Analysis and
73
+ Computations, 18(1), 1997, pp. 140--158.
74
+ https://doi.org/10.1137/S0895479894246905T.
75
+
76
+ Examples
77
+ --------
78
+ >>> import numpy as np
79
+ >>> from scipy.sparse.linalg import use_solver, spsolve
80
+ >>> from scipy.sparse import csc_matrix
81
+ >>> R = np.random.randn(5, 5)
82
+ >>> A = csc_matrix(R)
83
+ >>> b = np.random.randn(5)
84
+ >>> use_solver(useUmfpack=False) # enforce superLU over UMFPACK
85
+ >>> x = spsolve(A, b)
86
+ >>> np.allclose(A.dot(x), b)
87
+ True
88
+ >>> use_solver(useUmfpack=True) # reset umfPack usage to default
89
+ """
90
+ if 'useUmfpack' in kwargs:
91
+ globals()['useUmfpack'] = kwargs['useUmfpack']
92
+ if useUmfpack and 'assumeSortedIndices' in kwargs:
93
+ umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices'])
94
+
95
+ def _get_umf_family(A):
96
+ """Get umfpack family string given the sparse matrix dtype."""
97
+ _families = {
98
+ (np.float64, np.int32): 'di',
99
+ (np.complex128, np.int32): 'zi',
100
+ (np.float64, np.int64): 'dl',
101
+ (np.complex128, np.int64): 'zl'
102
+ }
103
+
104
+ # A.dtype.name can only be "float64" or
105
+ # "complex128" in control flow
106
+ f_type = getattr(np, A.dtype.name)
107
+ # control flow may allow for more index
108
+ # types to get through here
109
+ i_type = getattr(np, A.indices.dtype.name)
110
+
111
+ try:
112
+ family = _families[(f_type, i_type)]
113
+
114
+ except KeyError as e:
115
+ msg = ('only float64 or complex128 matrices with int32 or int64 '
116
+ f'indices are supported! (got: matrix: {f_type}, indices: {i_type})')
117
+ raise ValueError(msg) from e
118
+
119
+ # See gh-8278. Considered converting only if
120
+ # A.shape[0]*A.shape[1] > np.iinfo(np.int32).max,
121
+ # but that didn't always fix the issue.
122
+ family = family[0] + "l"
123
+ A_new = copy.copy(A)
124
+ A_new.indptr = np.asarray(A.indptr, dtype=np.int64)
125
+ A_new.indices = np.asarray(A.indices, dtype=np.int64)
126
+
127
+ return family, A_new
128
+
129
+ def _safe_downcast_indices(A):
130
+ # check for safe downcasting
131
+ max_value = np.iinfo(np.intc).max
132
+
133
+ if A.indptr[-1] > max_value: # indptr[-1] is max b/c indptr always sorted
134
+ raise ValueError("indptr values too large for SuperLU")
135
+
136
+ if max(*A.shape) > max_value: # only check large enough arrays
137
+ if np.any(A.indices > max_value):
138
+ raise ValueError("indices values too large for SuperLU")
139
+
140
+ indices = A.indices.astype(np.intc, copy=False)
141
+ indptr = A.indptr.astype(np.intc, copy=False)
142
+ return indices, indptr
143
+
144
+ def spsolve(A, b, permc_spec=None, use_umfpack=True):
145
+ """Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
146
+
147
+ Parameters
148
+ ----------
149
+ A : ndarray or sparse matrix
150
+ The square matrix A will be converted into CSC or CSR form
151
+ b : ndarray or sparse matrix
152
+ The matrix or vector representing the right hand side of the equation.
153
+ If a vector, b.shape must be (n,) or (n, 1).
154
+ permc_spec : str, optional
155
+ How to permute the columns of the matrix for sparsity preservation.
156
+ (default: 'COLAMD')
157
+
158
+ - ``NATURAL``: natural ordering.
159
+ - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
160
+ - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
161
+ - ``COLAMD``: approximate minimum degree column ordering [1]_, [2]_.
162
+
163
+ use_umfpack : bool, optional
164
+ if True (default) then use UMFPACK for the solution [3]_, [4]_, [5]_,
165
+ [6]_ . This is only referenced if b is a vector and
166
+ ``scikits.umfpack`` is installed.
167
+
168
+ Returns
169
+ -------
170
+ x : ndarray or sparse matrix
171
+ the solution of the sparse linear equation.
172
+ If b is a vector, then x is a vector of size A.shape[1]
173
+ If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])
174
+
175
+ Notes
176
+ -----
177
+ For solving the matrix expression AX = B, this solver assumes the resulting
178
+ matrix X is sparse, as is often the case for very sparse inputs. If the
179
+ resulting X is dense, the construction of this sparse result will be
180
+ relatively expensive. In that case, consider converting A to a dense
181
+ matrix and using scipy.linalg.solve or its variants.
182
+
183
+ References
184
+ ----------
185
+ .. [1] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836:
186
+ COLAMD, an approximate column minimum degree ordering algorithm,
187
+ ACM Trans. on Mathematical Software, 30(3), 2004, pp. 377--380.
188
+ :doi:`10.1145/1024074.1024080`
189
+
190
+ .. [2] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, A column approximate
191
+ minimum degree ordering algorithm, ACM Trans. on Mathematical
192
+ Software, 30(3), 2004, pp. 353--376. :doi:`10.1145/1024074.1024079`
193
+
194
+ .. [3] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
195
+ multifrontal method with a column pre-ordering strategy, ACM
196
+ Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
197
+ https://dl.acm.org/doi/abs/10.1145/992200.992206
198
+
199
+ .. [4] T. A. Davis, A column pre-ordering strategy for the
200
+ unsymmetric-pattern multifrontal method, ACM Trans.
201
+ on Mathematical Software, 30(2), 2004, pp. 165--195.
202
+ https://dl.acm.org/doi/abs/10.1145/992200.992205
203
+
204
+ .. [5] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
205
+ method for unsymmetric sparse matrices, ACM Trans. on
206
+ Mathematical Software, 25(1), 1999, pp. 1--19.
207
+ https://doi.org/10.1145/305658.287640
208
+
209
+ .. [6] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
210
+ method for sparse LU factorization, SIAM J. Matrix Analysis and
211
+ Computations, 18(1), 1997, pp. 140--158.
212
+ https://doi.org/10.1137/S0895479894246905T.
213
+
214
+
215
+ Examples
216
+ --------
217
+ >>> import numpy as np
218
+ >>> from scipy.sparse import csc_matrix
219
+ >>> from scipy.sparse.linalg import spsolve
220
+ >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
221
+ >>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float)
222
+ >>> x = spsolve(A, B)
223
+ >>> np.allclose(A.dot(x).toarray(), B.toarray())
224
+ True
225
+ """
226
+ is_pydata_sparse = is_pydata_spmatrix(b)
227
+ pydata_sparse_cls = b.__class__ if is_pydata_sparse else None
228
+ A = convert_pydata_sparse_to_scipy(A)
229
+ b = convert_pydata_sparse_to_scipy(b)
230
+
231
+ if not (issparse(A) and A.format in ("csc", "csr")):
232
+ A = csc_matrix(A)
233
+ warn('spsolve requires A be CSC or CSR matrix format',
234
+ SparseEfficiencyWarning, stacklevel=2)
235
+
236
+ # b is a vector only if b have shape (n,) or (n, 1)
237
+ b_is_sparse = issparse(b)
238
+ if not b_is_sparse:
239
+ b = asarray(b)
240
+ b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))
241
+
242
+ # sum duplicates for non-canonical format
243
+ A.sum_duplicates()
244
+ A = A._asfptype() # upcast to a floating point format
245
+ result_dtype = np.promote_types(A.dtype, b.dtype)
246
+ if A.dtype != result_dtype:
247
+ A = A.astype(result_dtype)
248
+ if b.dtype != result_dtype:
249
+ b = b.astype(result_dtype)
250
+
251
+ # validate input shapes
252
+ M, N = A.shape
253
+ if (M != N):
254
+ raise ValueError(f"matrix must be square (has shape {(M, N)})")
255
+
256
+ if M != b.shape[0]:
257
+ raise ValueError(f"matrix - rhs dimension mismatch ({A.shape} - {b.shape[0]})")
258
+
259
+ use_umfpack = use_umfpack and useUmfpack
260
+
261
+ if b_is_vector and use_umfpack:
262
+ if b_is_sparse:
263
+ b_vec = b.toarray()
264
+ else:
265
+ b_vec = b
266
+ b_vec = asarray(b_vec, dtype=A.dtype).ravel()
267
+
268
+ if noScikit:
269
+ raise RuntimeError('Scikits.umfpack not installed.')
270
+
271
+ if A.dtype.char not in 'dD':
272
+ raise ValueError("convert matrix data to double, please, using"
273
+ " .astype(), or set linsolve.useUmfpack = False")
274
+
275
+ umf_family, A = _get_umf_family(A)
276
+ umf = umfpack.UmfpackContext(umf_family)
277
+ x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec,
278
+ autoTranspose=True)
279
+ else:
280
+ if b_is_vector and b_is_sparse:
281
+ b = b.toarray()
282
+ b_is_sparse = False
283
+
284
+ if not b_is_sparse:
285
+ if A.format == "csc":
286
+ flag = 1 # CSC format
287
+ else:
288
+ flag = 0 # CSR format
289
+
290
+ indices = A.indices.astype(np.intc, copy=False)
291
+ indptr = A.indptr.astype(np.intc, copy=False)
292
+ options = dict(ColPerm=permc_spec)
293
+ x, info = _superlu.gssv(N, A.nnz, A.data, indices, indptr,
294
+ b, flag, options=options)
295
+ if info != 0:
296
+ warn("Matrix is exactly singular", MatrixRankWarning, stacklevel=2)
297
+ x.fill(np.nan)
298
+ if b_is_vector:
299
+ x = x.ravel()
300
+ else:
301
+ # b is sparse
302
+ Afactsolve = factorized(A)
303
+
304
+ if not (b.format == "csc" or is_pydata_spmatrix(b)):
305
+ warn('spsolve is more efficient when sparse b '
306
+ 'is in the CSC matrix format',
307
+ SparseEfficiencyWarning, stacklevel=2)
308
+ b = csc_matrix(b)
309
+
310
+ # Create a sparse output matrix by repeatedly applying
311
+ # the sparse factorization to solve columns of b.
312
+ data_segs = []
313
+ row_segs = []
314
+ col_segs = []
315
+ for j in range(b.shape[1]):
316
+ # TODO: replace this with
317
+ # bj = b[:, j].toarray().ravel()
318
+ # once 1D sparse arrays are supported.
319
+ # That is a slightly faster code path.
320
+ bj = b[:, [j]].toarray().ravel()
321
+ xj = Afactsolve(bj)
322
+ w = np.flatnonzero(xj)
323
+ segment_length = w.shape[0]
324
+ row_segs.append(w)
325
+ col_segs.append(np.full(segment_length, j, dtype=int))
326
+ data_segs.append(np.asarray(xj[w], dtype=A.dtype))
327
+ sparse_data = np.concatenate(data_segs)
328
+ sparse_row = np.concatenate(row_segs)
329
+ sparse_col = np.concatenate(col_segs)
330
+ x = A.__class__((sparse_data, (sparse_row, sparse_col)),
331
+ shape=b.shape, dtype=A.dtype)
332
+
333
+ if is_pydata_sparse:
334
+ x = pydata_sparse_cls.from_scipy_sparse(x)
335
+
336
+ return x
337
+
338
+
339
+ def splu(A, permc_spec=None, diag_pivot_thresh=None,
340
+ relax=None, panel_size=None, options=dict()):
341
+ """
342
+ Compute the LU decomposition of a sparse, square matrix.
343
+
344
+ Parameters
345
+ ----------
346
+ A : sparse matrix
347
+ Sparse matrix to factorize. Most efficient when provided in CSC
348
+ format. Other formats will be converted to CSC before factorization.
349
+ permc_spec : str, optional
350
+ How to permute the columns of the matrix for sparsity preservation.
351
+ (default: 'COLAMD')
352
+
353
+ - ``NATURAL``: natural ordering.
354
+ - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
355
+ - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
356
+ - ``COLAMD``: approximate minimum degree column ordering
357
+
358
+ diag_pivot_thresh : float, optional
359
+ Threshold used for a diagonal entry to be an acceptable pivot.
360
+ See SuperLU user's guide for details [1]_
361
+ relax : int, optional
362
+ Expert option for customizing the degree of relaxing supernodes.
363
+ See SuperLU user's guide for details [1]_
364
+ panel_size : int, optional
365
+ Expert option for customizing the panel size.
366
+ See SuperLU user's guide for details [1]_
367
+ options : dict, optional
368
+ Dictionary containing additional expert options to SuperLU.
369
+ See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument)
370
+ for more details. For example, you can specify
371
+ ``options=dict(Equil=False, IterRefine='SINGLE'))``
372
+ to turn equilibration off and perform a single iterative refinement.
373
+
374
+ Returns
375
+ -------
376
+ invA : scipy.sparse.linalg.SuperLU
377
+ Object, which has a ``solve`` method.
378
+
379
+ See also
380
+ --------
381
+ spilu : incomplete LU decomposition
382
+
383
+ Notes
384
+ -----
385
+ This function uses the SuperLU library.
386
+
387
+ References
388
+ ----------
389
+ .. [1] SuperLU https://portal.nersc.gov/project/sparse/superlu/
390
+
391
+ Examples
392
+ --------
393
+ >>> import numpy as np
394
+ >>> from scipy.sparse import csc_matrix
395
+ >>> from scipy.sparse.linalg import splu
396
+ >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
397
+ >>> B = splu(A)
398
+ >>> x = np.array([1., 2., 3.], dtype=float)
399
+ >>> B.solve(x)
400
+ array([ 1. , -3. , -1.5])
401
+ >>> A.dot(B.solve(x))
402
+ array([ 1., 2., 3.])
403
+ >>> B.solve(A.dot(x))
404
+ array([ 1., 2., 3.])
405
+ """
406
+
407
+ if is_pydata_spmatrix(A):
408
+ def csc_construct_func(*a, cls=type(A)):
409
+ return cls.from_scipy_sparse(csc_matrix(*a))
410
+ A = A.to_scipy_sparse().tocsc()
411
+ else:
412
+ csc_construct_func = csc_matrix
413
+
414
+ if not (issparse(A) and A.format == "csc"):
415
+ A = csc_matrix(A)
416
+ warn('splu converted its input to CSC format',
417
+ SparseEfficiencyWarning, stacklevel=2)
418
+
419
+ # sum duplicates for non-canonical format
420
+ A.sum_duplicates()
421
+ A = A._asfptype() # upcast to a floating point format
422
+
423
+ M, N = A.shape
424
+ if (M != N):
425
+ raise ValueError("can only factor square matrices") # is this true?
426
+
427
+ indices, indptr = _safe_downcast_indices(A)
428
+
429
+ _options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
430
+ PanelSize=panel_size, Relax=relax)
431
+ if options is not None:
432
+ _options.update(options)
433
+
434
+ # Ensure that no column permutations are applied
435
+ if (_options["ColPerm"] == "NATURAL"):
436
+ _options["SymmetricMode"] = True
437
+
438
+ return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
439
+ csc_construct_func=csc_construct_func,
440
+ ilu=False, options=_options)
441
+
442
+
443
+ def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
444
+ diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
445
+ """
446
+ Compute an incomplete LU decomposition for a sparse, square matrix.
447
+
448
+ The resulting object is an approximation to the inverse of `A`.
449
+
450
+ Parameters
451
+ ----------
452
+ A : (N, N) array_like
453
+ Sparse matrix to factorize. Most efficient when provided in CSC format.
454
+ Other formats will be converted to CSC before factorization.
455
+ drop_tol : float, optional
456
+ Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
457
+ (default: 1e-4)
458
+ fill_factor : float, optional
459
+ Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
460
+ drop_rule : str, optional
461
+ Comma-separated string of drop rules to use.
462
+ Available rules: ``basic``, ``prows``, ``column``, ``area``,
463
+ ``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
464
+
465
+ See SuperLU documentation for details.
466
+
467
+ Remaining other options
468
+ Same as for `splu`
469
+
470
+ Returns
471
+ -------
472
+ invA_approx : scipy.sparse.linalg.SuperLU
473
+ Object, which has a ``solve`` method.
474
+
475
+ See also
476
+ --------
477
+ splu : complete LU decomposition
478
+
479
+ Notes
480
+ -----
481
+ To improve the better approximation to the inverse, you may need to
482
+ increase `fill_factor` AND decrease `drop_tol`.
483
+
484
+ This function uses the SuperLU library.
485
+
486
+ Examples
487
+ --------
488
+ >>> import numpy as np
489
+ >>> from scipy.sparse import csc_matrix
490
+ >>> from scipy.sparse.linalg import spilu
491
+ >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
492
+ >>> B = spilu(A)
493
+ >>> x = np.array([1., 2., 3.], dtype=float)
494
+ >>> B.solve(x)
495
+ array([ 1. , -3. , -1.5])
496
+ >>> A.dot(B.solve(x))
497
+ array([ 1., 2., 3.])
498
+ >>> B.solve(A.dot(x))
499
+ array([ 1., 2., 3.])
500
+ """
501
+
502
+ if is_pydata_spmatrix(A):
503
+ def csc_construct_func(*a, cls=type(A)):
504
+ return cls.from_scipy_sparse(csc_matrix(*a))
505
+ A = A.to_scipy_sparse().tocsc()
506
+ else:
507
+ csc_construct_func = csc_matrix
508
+
509
+ if not (issparse(A) and A.format == "csc"):
510
+ A = csc_matrix(A)
511
+ warn('spilu converted its input to CSC format',
512
+ SparseEfficiencyWarning, stacklevel=2)
513
+
514
+ # sum duplicates for non-canonical format
515
+ A.sum_duplicates()
516
+ A = A._asfptype() # upcast to a floating point format
517
+
518
+ M, N = A.shape
519
+ if (M != N):
520
+ raise ValueError("can only factor square matrices") # is this true?
521
+
522
+ indices, indptr = _safe_downcast_indices(A)
523
+
524
+ _options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
525
+ ILU_FillFactor=fill_factor,
526
+ DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
527
+ PanelSize=panel_size, Relax=relax)
528
+ if options is not None:
529
+ _options.update(options)
530
+
531
+ # Ensure that no column permutations are applied
532
+ if (_options["ColPerm"] == "NATURAL"):
533
+ _options["SymmetricMode"] = True
534
+
535
+ return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
536
+ csc_construct_func=csc_construct_func,
537
+ ilu=True, options=_options)
538
+
539
+
540
+ def factorized(A):
541
+ """
542
+ Return a function for solving a sparse linear system, with A pre-factorized.
543
+
544
+ Parameters
545
+ ----------
546
+ A : (N, N) array_like
547
+ Input. A in CSC format is most efficient. A CSR format matrix will
548
+ be converted to CSC before factorization.
549
+
550
+ Returns
551
+ -------
552
+ solve : callable
553
+ To solve the linear system of equations given in `A`, the `solve`
554
+ callable should be passed an ndarray of shape (N,).
555
+
556
+ Examples
557
+ --------
558
+ >>> import numpy as np
559
+ >>> from scipy.sparse.linalg import factorized
560
+ >>> from scipy.sparse import csc_matrix
561
+ >>> A = np.array([[ 3. , 2. , -1. ],
562
+ ... [ 2. , -2. , 4. ],
563
+ ... [-1. , 0.5, -1. ]])
564
+ >>> solve = factorized(csc_matrix(A)) # Makes LU decomposition.
565
+ >>> rhs1 = np.array([1, -2, 0])
566
+ >>> solve(rhs1) # Uses the LU factors.
567
+ array([ 1., -2., -2.])
568
+
569
+ """
570
+ if is_pydata_spmatrix(A):
571
+ A = A.to_scipy_sparse().tocsc()
572
+
573
+ if useUmfpack:
574
+ if noScikit:
575
+ raise RuntimeError('Scikits.umfpack not installed.')
576
+
577
+ if not (issparse(A) and A.format == "csc"):
578
+ A = csc_matrix(A)
579
+ warn('splu converted its input to CSC format',
580
+ SparseEfficiencyWarning, stacklevel=2)
581
+
582
+ A = A._asfptype() # upcast to a floating point format
583
+
584
+ if A.dtype.char not in 'dD':
585
+ raise ValueError("convert matrix data to double, please, using"
586
+ " .astype(), or set linsolve.useUmfpack = False")
587
+
588
+ umf_family, A = _get_umf_family(A)
589
+ umf = umfpack.UmfpackContext(umf_family)
590
+
591
+ # Make LU decomposition.
592
+ umf.numeric(A)
593
+
594
+ def solve(b):
595
+ with np.errstate(divide="ignore", invalid="ignore"):
596
+ # Ignoring warnings with numpy >= 1.23.0, see gh-16523
597
+ result = umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
598
+
599
+ return result
600
+
601
+ return solve
602
+ else:
603
+ return splu(A).solve
604
+
605
+
606
+ def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False,
607
+ unit_diagonal=False):
608
+ """
609
+ Solve the equation ``A x = b`` for `x`, assuming A is a triangular matrix.
610
+
611
+ Parameters
612
+ ----------
613
+ A : (M, M) sparse matrix
614
+ A sparse square triangular matrix. Should be in CSR format.
615
+ b : (M,) or (M, N) array_like
616
+ Right-hand side matrix in ``A x = b``
617
+ lower : bool, optional
618
+ Whether `A` is a lower or upper triangular matrix.
619
+ Default is lower triangular matrix.
620
+ overwrite_A : bool, optional
621
+ Allow changing `A`. The indices of `A` are going to be sorted and zero
622
+ entries are going to be removed.
623
+ Enabling gives a performance gain. Default is False.
624
+ overwrite_b : bool, optional
625
+ Allow overwriting data in `b`.
626
+ Enabling gives a performance gain. Default is False.
627
+ If `overwrite_b` is True, it should be ensured that
628
+ `b` has an appropriate dtype to be able to store the result.
629
+ unit_diagonal : bool, optional
630
+ If True, diagonal elements of `a` are assumed to be 1 and will not be
631
+ referenced.
632
+
633
+ .. versionadded:: 1.4.0
634
+
635
+ Returns
636
+ -------
637
+ x : (M,) or (M, N) ndarray
638
+ Solution to the system ``A x = b``. Shape of return matches shape
639
+ of `b`.
640
+
641
+ Raises
642
+ ------
643
+ LinAlgError
644
+ If `A` is singular or not triangular.
645
+ ValueError
646
+ If shape of `A` or shape of `b` do not match the requirements.
647
+
648
+ Notes
649
+ -----
650
+ .. versionadded:: 0.19.0
651
+
652
+ Examples
653
+ --------
654
+ >>> import numpy as np
655
+ >>> from scipy.sparse import csr_matrix
656
+ >>> from scipy.sparse.linalg import spsolve_triangular
657
+ >>> A = csr_matrix([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
658
+ >>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float)
659
+ >>> x = spsolve_triangular(A, B)
660
+ >>> np.allclose(A.dot(x), B)
661
+ True
662
+ """
663
+
664
+ if is_pydata_spmatrix(A):
665
+ A = A.to_scipy_sparse().tocsr()
666
+
667
+ # Check the input for correct type and format.
668
+ if not (issparse(A) and A.format == "csr"):
669
+ warn('CSR matrix format is required. Converting to CSR matrix.',
670
+ SparseEfficiencyWarning, stacklevel=2)
671
+ A = csr_matrix(A)
672
+ elif not overwrite_A:
673
+ A = A.copy()
674
+
675
+ if A.shape[0] != A.shape[1]:
676
+ raise ValueError(
677
+ f'A must be a square matrix but its shape is {A.shape}.')
678
+
679
+ # sum duplicates for non-canonical format
680
+ A.sum_duplicates()
681
+
682
+ b = np.asanyarray(b)
683
+
684
+ if b.ndim not in [1, 2]:
685
+ raise ValueError(
686
+ f'b must have 1 or 2 dims but its shape is {b.shape}.')
687
+ if A.shape[0] != b.shape[0]:
688
+ raise ValueError(
689
+ 'The size of the dimensions of A must be equal to '
690
+ 'the size of the first dimension of b but the shape of A is '
691
+ f'{A.shape} and the shape of b is {b.shape}.'
692
+ )
693
+
694
+ # Init x as (a copy of) b.
695
+ x_dtype = np.result_type(A.data, b, np.float64)
696
+ if overwrite_b:
697
+ if np.can_cast(b.dtype, x_dtype, casting='same_kind'):
698
+ x = b
699
+ else:
700
+ raise ValueError(
701
+ f'Cannot overwrite b (dtype {b.dtype}) with result '
702
+ f'of type {x_dtype}.'
703
+ )
704
+ else:
705
+ x = b.astype(x_dtype, copy=True)
706
+
707
+ # Choose forward or backward order.
708
+ if lower:
709
+ row_indices = range(len(b))
710
+ else:
711
+ row_indices = range(len(b) - 1, -1, -1)
712
+
713
+ # Fill x iteratively.
714
+ for i in row_indices:
715
+
716
+ # Get indices for i-th row.
717
+ indptr_start = A.indptr[i]
718
+ indptr_stop = A.indptr[i + 1]
719
+
720
+ if lower:
721
+ A_diagonal_index_row_i = indptr_stop - 1
722
+ A_off_diagonal_indices_row_i = slice(indptr_start, indptr_stop - 1)
723
+ else:
724
+ A_diagonal_index_row_i = indptr_start
725
+ A_off_diagonal_indices_row_i = slice(indptr_start + 1, indptr_stop)
726
+
727
+ # Check regularity and triangularity of A.
728
+ if not unit_diagonal and (indptr_stop <= indptr_start
729
+ or A.indices[A_diagonal_index_row_i] < i):
730
+ raise LinAlgError(
731
+ f'A is singular: diagonal {i} is zero.')
732
+ if not unit_diagonal and A.indices[A_diagonal_index_row_i] > i:
733
+ raise LinAlgError(
734
+ 'A is not triangular: A[{}, {}] is nonzero.'
735
+ ''.format(i, A.indices[A_diagonal_index_row_i]))
736
+
737
+ # Incorporate off-diagonal entries.
738
+ A_column_indices_in_row_i = A.indices[A_off_diagonal_indices_row_i]
739
+ A_values_in_row_i = A.data[A_off_diagonal_indices_row_i]
740
+ x[i] -= np.dot(x[A_column_indices_in_row_i].T, A_values_in_row_i)
741
+
742
+ # Compute i-th entry of x.
743
+ if not unit_diagonal:
744
+ x[i] /= A.data[A_diagonal_index_row_i]
745
+
746
+ return x
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc ADDED
Binary file (26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py ADDED
@@ -0,0 +1,805 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import threading
3
+
4
+ import numpy as np
5
+ from numpy import array, finfo, arange, eye, all, unique, ones, dot
6
+ import numpy.random as random
7
+ from numpy.testing import (
8
+ assert_array_almost_equal, assert_almost_equal,
9
+ assert_equal, assert_array_equal, assert_, assert_allclose,
10
+ assert_warns, suppress_warnings)
11
+ import pytest
12
+ from pytest import raises as assert_raises
13
+
14
+ import scipy.linalg
15
+ from scipy.linalg import norm, inv
16
+ from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,
17
+ csr_matrix, identity, issparse, dok_matrix, lil_matrix, bsr_matrix)
18
+ from scipy.sparse.linalg import SuperLU
19
+ from scipy.sparse.linalg._dsolve import (spsolve, use_solver, splu, spilu,
20
+ MatrixRankWarning, _superlu, spsolve_triangular, factorized)
21
+ import scipy.sparse
22
+
23
+ from scipy._lib._testutils import check_free_memory
24
+ from scipy._lib._util import ComplexWarning
25
+
26
+
27
+ sup_sparse_efficiency = suppress_warnings()
28
+ sup_sparse_efficiency.filter(SparseEfficiencyWarning)
29
+
30
+ # scikits.umfpack is not a SciPy dependency but it is optionally used in
31
+ # dsolve, so check whether it's available
32
+ try:
33
+ import scikits.umfpack as umfpack
34
+ has_umfpack = True
35
+ except ImportError:
36
+ has_umfpack = False
37
+
38
+ def toarray(a):
39
+ if issparse(a):
40
+ return a.toarray()
41
+ else:
42
+ return a
43
+
44
+
45
+ def setup_bug_8278():
46
+ N = 2 ** 6
47
+ h = 1/N
48
+ Ah1D = scipy.sparse.diags([-1, 2, -1], [-1, 0, 1],
49
+ shape=(N-1, N-1))/(h**2)
50
+ eyeN = scipy.sparse.eye(N - 1)
51
+ A = (scipy.sparse.kron(eyeN, scipy.sparse.kron(eyeN, Ah1D))
52
+ + scipy.sparse.kron(eyeN, scipy.sparse.kron(Ah1D, eyeN))
53
+ + scipy.sparse.kron(Ah1D, scipy.sparse.kron(eyeN, eyeN)))
54
+ b = np.random.rand((N-1)**3)
55
+ return A, b
56
+
57
+
58
+ class TestFactorized:
59
+ def setup_method(self):
60
+ n = 5
61
+ d = arange(n) + 1
62
+ self.n = n
63
+ self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc()
64
+ random.seed(1234)
65
+
66
+ def _check_singular(self):
67
+ A = csc_matrix((5,5), dtype='d')
68
+ b = ones(5)
69
+ assert_array_almost_equal(0. * b, factorized(A)(b))
70
+
71
+ def _check_non_singular(self):
72
+ # Make a diagonal dominant, to make sure it is not singular
73
+ n = 5
74
+ a = csc_matrix(random.rand(n, n))
75
+ b = ones(n)
76
+
77
+ expected = splu(a).solve(b)
78
+ assert_array_almost_equal(factorized(a)(b), expected)
79
+
80
+ def test_singular_without_umfpack(self):
81
+ use_solver(useUmfpack=False)
82
+ with assert_raises(RuntimeError, match="Factor is exactly singular"):
83
+ self._check_singular()
84
+
85
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
86
+ def test_singular_with_umfpack(self):
87
+ use_solver(useUmfpack=True)
88
+ with suppress_warnings() as sup:
89
+ sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars")
90
+ assert_warns(umfpack.UmfpackWarning, self._check_singular)
91
+
92
+ def test_non_singular_without_umfpack(self):
93
+ use_solver(useUmfpack=False)
94
+ self._check_non_singular()
95
+
96
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
97
+ def test_non_singular_with_umfpack(self):
98
+ use_solver(useUmfpack=True)
99
+ self._check_non_singular()
100
+
101
+ def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):
102
+ use_solver(useUmfpack=False)
103
+ msg = "can only factor square matrices"
104
+ with assert_raises(ValueError, match=msg):
105
+ factorized(self.A[:, :4])
106
+
107
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
108
+ def test_factorizes_nonsquare_matrix_with_umfpack(self):
109
+ use_solver(useUmfpack=True)
110
+ # does not raise
111
+ factorized(self.A[:,:4])
112
+
113
+ def test_call_with_incorrectly_sized_matrix_without_umfpack(self):
114
+ use_solver(useUmfpack=False)
115
+ solve = factorized(self.A)
116
+ b = random.rand(4)
117
+ B = random.rand(4, 3)
118
+ BB = random.rand(self.n, 3, 9)
119
+
120
+ with assert_raises(ValueError, match="is of incompatible size"):
121
+ solve(b)
122
+ with assert_raises(ValueError, match="is of incompatible size"):
123
+ solve(B)
124
+ with assert_raises(ValueError,
125
+ match="object too deep for desired array"):
126
+ solve(BB)
127
+
128
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
129
+ def test_call_with_incorrectly_sized_matrix_with_umfpack(self):
130
+ use_solver(useUmfpack=True)
131
+ solve = factorized(self.A)
132
+ b = random.rand(4)
133
+ B = random.rand(4, 3)
134
+ BB = random.rand(self.n, 3, 9)
135
+
136
+ # does not raise
137
+ solve(b)
138
+ msg = "object too deep for desired array"
139
+ with assert_raises(ValueError, match=msg):
140
+ solve(B)
141
+ with assert_raises(ValueError, match=msg):
142
+ solve(BB)
143
+
144
+ def test_call_with_cast_to_complex_without_umfpack(self):
145
+ use_solver(useUmfpack=False)
146
+ solve = factorized(self.A)
147
+ b = random.rand(4)
148
+ for t in [np.complex64, np.complex128]:
149
+ with assert_raises(TypeError, match="Cannot cast array data"):
150
+ solve(b.astype(t))
151
+
152
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
153
+ def test_call_with_cast_to_complex_with_umfpack(self):
154
+ use_solver(useUmfpack=True)
155
+ solve = factorized(self.A)
156
+ b = random.rand(4)
157
+ for t in [np.complex64, np.complex128]:
158
+ assert_warns(ComplexWarning, solve, b.astype(t))
159
+
160
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
161
+ def test_assume_sorted_indices_flag(self):
162
+ # a sparse matrix with unsorted indices
163
+ unsorted_inds = np.array([2, 0, 1, 0])
164
+ data = np.array([10, 16, 5, 0.4])
165
+ indptr = np.array([0, 1, 2, 4])
166
+ A = csc_matrix((data, unsorted_inds, indptr), (3, 3))
167
+ b = ones(3)
168
+
169
+ # should raise when incorrectly assuming indices are sorted
170
+ use_solver(useUmfpack=True, assumeSortedIndices=True)
171
+ with assert_raises(RuntimeError,
172
+ match="UMFPACK_ERROR_invalid_matrix"):
173
+ factorized(A)
174
+
175
+ # should sort indices and succeed when not assuming indices are sorted
176
+ use_solver(useUmfpack=True, assumeSortedIndices=False)
177
+ expected = splu(A.copy()).solve(b)
178
+
179
+ assert_equal(A.has_sorted_indices, 0)
180
+ assert_array_almost_equal(factorized(A)(b), expected)
181
+
182
+ @pytest.mark.slow
183
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
184
+ def test_bug_8278(self):
185
+ check_free_memory(8000)
186
+ use_solver(useUmfpack=True)
187
+ A, b = setup_bug_8278()
188
+ A = A.tocsc()
189
+ f = factorized(A)
190
+ x = f(b)
191
+ assert_array_almost_equal(A @ x, b)
192
+
193
+
194
+ class TestLinsolve:
195
+ def setup_method(self):
196
+ use_solver(useUmfpack=False)
197
+
198
+ def test_singular(self):
199
+ A = csc_matrix((5,5), dtype='d')
200
+ b = array([1, 2, 3, 4, 5],dtype='d')
201
+ with suppress_warnings() as sup:
202
+ sup.filter(MatrixRankWarning, "Matrix is exactly singular")
203
+ x = spsolve(A, b)
204
+ assert_(not np.isfinite(x).any())
205
+
206
+ def test_singular_gh_3312(self):
207
+ # "Bad" test case that leads SuperLU to call LAPACK with invalid
208
+ # arguments. Check that it fails moderately gracefully.
209
+ ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
210
+ v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
211
+ A = csc_matrix((v, ij.T), shape=(20, 20))
212
+ b = np.arange(20)
213
+
214
+ try:
215
+ # should either raise a runtime error or return value
216
+ # appropriate for singular input (which yields the warning)
217
+ with suppress_warnings() as sup:
218
+ sup.filter(MatrixRankWarning, "Matrix is exactly singular")
219
+ x = spsolve(A, b)
220
+ assert not np.isfinite(x).any()
221
+ except RuntimeError:
222
+ pass
223
+
224
+ @pytest.mark.parametrize('format', ['csc', 'csr'])
225
+ @pytest.mark.parametrize('idx_dtype', [np.int32, np.int64])
226
+ def test_twodiags(self, format: str, idx_dtype: np.dtype):
227
+ A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5,
228
+ format=format)
229
+ b = array([1, 2, 3, 4, 5])
230
+
231
+ # condition number of A
232
+ cond_A = norm(A.toarray(), 2) * norm(inv(A.toarray()), 2)
233
+
234
+ for t in ['f','d','F','D']:
235
+ eps = finfo(t).eps # floating point epsilon
236
+ b = b.astype(t)
237
+ Asp = A.astype(t)
238
+ Asp.indices = Asp.indices.astype(idx_dtype, copy=False)
239
+ Asp.indptr = Asp.indptr.astype(idx_dtype, copy=False)
240
+
241
+ x = spsolve(Asp, b)
242
+ assert_(norm(b - Asp@x) < 10 * cond_A * eps)
243
+
244
+ def test_bvector_smoketest(self):
245
+ Adense = array([[0., 1., 1.],
246
+ [1., 0., 1.],
247
+ [0., 0., 1.]])
248
+ As = csc_matrix(Adense)
249
+ random.seed(1234)
250
+ x = random.randn(3)
251
+ b = As@x
252
+ x2 = spsolve(As, b)
253
+
254
+ assert_array_almost_equal(x, x2)
255
+
256
+ def test_bmatrix_smoketest(self):
257
+ Adense = array([[0., 1., 1.],
258
+ [1., 0., 1.],
259
+ [0., 0., 1.]])
260
+ As = csc_matrix(Adense)
261
+ random.seed(1234)
262
+ x = random.randn(3, 4)
263
+ Bdense = As.dot(x)
264
+ Bs = csc_matrix(Bdense)
265
+ x2 = spsolve(As, Bs)
266
+ assert_array_almost_equal(x, x2.toarray())
267
+
268
+ @sup_sparse_efficiency
269
+ def test_non_square(self):
270
+ # A is not square.
271
+ A = ones((3, 4))
272
+ b = ones((4, 1))
273
+ assert_raises(ValueError, spsolve, A, b)
274
+ # A2 and b2 have incompatible shapes.
275
+ A2 = csc_matrix(eye(3))
276
+ b2 = array([1.0, 2.0])
277
+ assert_raises(ValueError, spsolve, A2, b2)
278
+
279
+ @sup_sparse_efficiency
280
+ def test_example_comparison(self):
281
+ row = array([0,0,1,2,2,2])
282
+ col = array([0,2,2,0,1,2])
283
+ data = array([1,2,3,-4,5,6])
284
+ sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)
285
+ M = sM.toarray()
286
+
287
+ row = array([0,0,1,1,0,0])
288
+ col = array([0,2,1,1,0,0])
289
+ data = array([1,1,1,1,1,1])
290
+ sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)
291
+ N = sN.toarray()
292
+
293
+ sX = spsolve(sM, sN)
294
+ X = scipy.linalg.solve(M, N)
295
+
296
+ assert_array_almost_equal(X, sX.toarray())
297
+
298
+ @sup_sparse_efficiency
299
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
300
+ def test_shape_compatibility(self):
301
+ use_solver(useUmfpack=True)
302
+ A = csc_matrix([[1., 0], [0, 2]])
303
+ bs = [
304
+ [1, 6],
305
+ array([1, 6]),
306
+ [[1], [6]],
307
+ array([[1], [6]]),
308
+ csc_matrix([[1], [6]]),
309
+ csr_matrix([[1], [6]]),
310
+ dok_matrix([[1], [6]]),
311
+ bsr_matrix([[1], [6]]),
312
+ array([[1., 2., 3.], [6., 8., 10.]]),
313
+ csc_matrix([[1., 2., 3.], [6., 8., 10.]]),
314
+ csr_matrix([[1., 2., 3.], [6., 8., 10.]]),
315
+ dok_matrix([[1., 2., 3.], [6., 8., 10.]]),
316
+ bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),
317
+ ]
318
+
319
+ for b in bs:
320
+ x = np.linalg.solve(A.toarray(), toarray(b))
321
+ for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:
322
+ x1 = spsolve(spmattype(A), b, use_umfpack=True)
323
+ x2 = spsolve(spmattype(A), b, use_umfpack=False)
324
+
325
+ # check solution
326
+ if x.ndim == 2 and x.shape[1] == 1:
327
+ # interprets also these as "vectors"
328
+ x = x.ravel()
329
+
330
+ assert_array_almost_equal(toarray(x1), x,
331
+ err_msg=repr((b, spmattype, 1)))
332
+ assert_array_almost_equal(toarray(x2), x,
333
+ err_msg=repr((b, spmattype, 2)))
334
+
335
+ # dense vs. sparse output ("vectors" are always dense)
336
+ if issparse(b) and x.ndim > 1:
337
+ assert_(issparse(x1), repr((b, spmattype, 1)))
338
+ assert_(issparse(x2), repr((b, spmattype, 2)))
339
+ else:
340
+ assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
341
+ assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
342
+
343
+ # check output shape
344
+ if x.ndim == 1:
345
+ # "vector"
346
+ assert_equal(x1.shape, (A.shape[1],))
347
+ assert_equal(x2.shape, (A.shape[1],))
348
+ else:
349
+ # "matrix"
350
+ assert_equal(x1.shape, x.shape)
351
+ assert_equal(x2.shape, x.shape)
352
+
353
+ A = csc_matrix((3, 3))
354
+ b = csc_matrix((1, 3))
355
+ assert_raises(ValueError, spsolve, A, b)
356
+
357
+ @sup_sparse_efficiency
358
+ def test_ndarray_support(self):
359
+ A = array([[1., 2.], [2., 0.]])
360
+ x = array([[1., 1.], [0.5, -0.5]])
361
+ b = array([[2., 0.], [2., 2.]])
362
+
363
+ assert_array_almost_equal(x, spsolve(A, b))
364
+
365
+ def test_gssv_badinput(self):
366
+ N = 10
367
+ d = arange(N) + 1.0
368
+ A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)
369
+
370
+ for spmatrix in (csc_matrix, csr_matrix):
371
+ A = spmatrix(A)
372
+ b = np.arange(N)
373
+
374
+ def not_c_contig(x):
375
+ return x.repeat(2)[::2]
376
+
377
+ def not_1dim(x):
378
+ return x[:,None]
379
+
380
+ def bad_type(x):
381
+ return x.astype(bool)
382
+
383
+ def too_short(x):
384
+ return x[:-1]
385
+
386
+ badops = [not_c_contig, not_1dim, bad_type, too_short]
387
+
388
+ for badop in badops:
389
+ msg = f"{spmatrix!r} {badop!r}"
390
+ # Not C-contiguous
391
+ assert_raises((ValueError, TypeError), _superlu.gssv,
392
+ N, A.nnz, badop(A.data), A.indices, A.indptr,
393
+ b, int(spmatrix == csc_matrix), err_msg=msg)
394
+ assert_raises((ValueError, TypeError), _superlu.gssv,
395
+ N, A.nnz, A.data, badop(A.indices), A.indptr,
396
+ b, int(spmatrix == csc_matrix), err_msg=msg)
397
+ assert_raises((ValueError, TypeError), _superlu.gssv,
398
+ N, A.nnz, A.data, A.indices, badop(A.indptr),
399
+ b, int(spmatrix == csc_matrix), err_msg=msg)
400
+
401
+ def test_sparsity_preservation(self):
402
+ ident = csc_matrix([
403
+ [1, 0, 0],
404
+ [0, 1, 0],
405
+ [0, 0, 1]])
406
+ b = csc_matrix([
407
+ [0, 1],
408
+ [1, 0],
409
+ [0, 0]])
410
+ x = spsolve(ident, b)
411
+ assert_equal(ident.nnz, 3)
412
+ assert_equal(b.nnz, 2)
413
+ assert_equal(x.nnz, 2)
414
+ assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12)
415
+
416
+ def test_dtype_cast(self):
417
+ A_real = scipy.sparse.csr_matrix([[1, 2, 0],
418
+ [0, 0, 3],
419
+ [4, 0, 5]])
420
+ A_complex = scipy.sparse.csr_matrix([[1, 2, 0],
421
+ [0, 0, 3],
422
+ [4, 0, 5 + 1j]])
423
+ b_real = np.array([1,1,1])
424
+ b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
425
+ x = spsolve(A_real, b_real)
426
+ assert_(np.issubdtype(x.dtype, np.floating))
427
+ x = spsolve(A_real, b_complex)
428
+ assert_(np.issubdtype(x.dtype, np.complexfloating))
429
+ x = spsolve(A_complex, b_real)
430
+ assert_(np.issubdtype(x.dtype, np.complexfloating))
431
+ x = spsolve(A_complex, b_complex)
432
+ assert_(np.issubdtype(x.dtype, np.complexfloating))
433
+
434
+ @pytest.mark.slow
435
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
436
+ def test_bug_8278(self):
437
+ check_free_memory(8000)
438
+ use_solver(useUmfpack=True)
439
+ A, b = setup_bug_8278()
440
+ x = spsolve(A, b)
441
+ assert_array_almost_equal(A @ x, b)
442
+
443
+
444
+ class TestSplu:
445
+ def setup_method(self):
446
+ use_solver(useUmfpack=False)
447
+ n = 40
448
+ d = arange(n) + 1
449
+ self.n = n
450
+ self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n, format='csc')
451
+ random.seed(1234)
452
+
453
+ def _smoketest(self, spxlu, check, dtype, idx_dtype):
454
+ if np.issubdtype(dtype, np.complexfloating):
455
+ A = self.A + 1j*self.A.T
456
+ else:
457
+ A = self.A
458
+
459
+ A = A.astype(dtype)
460
+ A.indices = A.indices.astype(idx_dtype, copy=False)
461
+ A.indptr = A.indptr.astype(idx_dtype, copy=False)
462
+ lu = spxlu(A)
463
+
464
+ rng = random.RandomState(1234)
465
+
466
+ # Input shapes
467
+ for k in [None, 1, 2, self.n, self.n+2]:
468
+ msg = f"k={k!r}"
469
+
470
+ if k is None:
471
+ b = rng.rand(self.n)
472
+ else:
473
+ b = rng.rand(self.n, k)
474
+
475
+ if np.issubdtype(dtype, np.complexfloating):
476
+ b = b + 1j*rng.rand(*b.shape)
477
+ b = b.astype(dtype)
478
+
479
+ x = lu.solve(b)
480
+ check(A, b, x, msg)
481
+
482
+ x = lu.solve(b, 'T')
483
+ check(A.T, b, x, msg)
484
+
485
+ x = lu.solve(b, 'H')
486
+ check(A.T.conj(), b, x, msg)
487
+
488
+ @sup_sparse_efficiency
489
+ def test_splu_smoketest(self):
490
+ self._internal_test_splu_smoketest()
491
+
492
+ def _internal_test_splu_smoketest(self):
493
+ # Check that splu works at all
494
+ def check(A, b, x, msg=""):
495
+ eps = np.finfo(A.dtype).eps
496
+ r = A @ x
497
+ assert_(abs(r - b).max() < 1e3*eps, msg)
498
+
499
+ for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
500
+ for idx_dtype in [np.int32, np.int64]:
501
+ self._smoketest(splu, check, dtype, idx_dtype)
502
+
503
+ @sup_sparse_efficiency
504
+ def test_spilu_smoketest(self):
505
+ self._internal_test_spilu_smoketest()
506
+
507
+ def _internal_test_spilu_smoketest(self):
508
+ errors = []
509
+
510
+ def check(A, b, x, msg=""):
511
+ r = A @ x
512
+ err = abs(r - b).max()
513
+ assert_(err < 1e-2, msg)
514
+ if b.dtype in (np.float64, np.complex128):
515
+ errors.append(err)
516
+
517
+ for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
518
+ for idx_dtype in [np.int32, np.int64]:
519
+ self._smoketest(spilu, check, dtype, idx_dtype)
520
+
521
+ assert_(max(errors) > 1e-5)
522
+
523
+ @sup_sparse_efficiency
524
+ def test_spilu_drop_rule(self):
525
+ # Test passing in the drop_rule argument to spilu.
526
+ A = identity(2)
527
+
528
+ rules = [
529
+ b'basic,area'.decode('ascii'), # unicode
530
+ b'basic,area', # ascii
531
+ [b'basic', b'area'.decode('ascii')]
532
+ ]
533
+ for rule in rules:
534
+ # Argument should be accepted
535
+ assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
536
+
537
+ def test_splu_nnz0(self):
538
+ A = csc_matrix((5,5), dtype='d')
539
+ assert_raises(RuntimeError, splu, A)
540
+
541
+ def test_spilu_nnz0(self):
542
+ A = csc_matrix((5,5), dtype='d')
543
+ assert_raises(RuntimeError, spilu, A)
544
+
545
+ def test_splu_basic(self):
546
+ # Test basic splu functionality.
547
+ n = 30
548
+ rng = random.RandomState(12)
549
+ a = rng.rand(n, n)
550
+ a[a < 0.95] = 0
551
+ # First test with a singular matrix
552
+ a[:, 0] = 0
553
+ a_ = csc_matrix(a)
554
+ # Matrix is exactly singular
555
+ assert_raises(RuntimeError, splu, a_)
556
+
557
+ # Make a diagonal dominant, to make sure it is not singular
558
+ a += 4*eye(n)
559
+ a_ = csc_matrix(a)
560
+ lu = splu(a_)
561
+ b = ones(n)
562
+ x = lu.solve(b)
563
+ assert_almost_equal(dot(a, x), b)
564
+
565
+ def test_splu_perm(self):
566
+ # Test the permutation vectors exposed by splu.
567
+ n = 30
568
+ a = random.random((n, n))
569
+ a[a < 0.95] = 0
570
+ # Make a diagonal dominant, to make sure it is not singular
571
+ a += 4*eye(n)
572
+ a_ = csc_matrix(a)
573
+ lu = splu(a_)
574
+ # Check that the permutation indices do belong to [0, n-1].
575
+ for perm in (lu.perm_r, lu.perm_c):
576
+ assert_(all(perm > -1))
577
+ assert_(all(perm < n))
578
+ assert_equal(len(unique(perm)), len(perm))
579
+
580
+ # Now make a symmetric, and test that the two permutation vectors are
581
+ # the same
582
+ # Note: a += a.T relies on undefined behavior.
583
+ a = a + a.T
584
+ a_ = csc_matrix(a)
585
+ lu = splu(a_)
586
+ assert_array_equal(lu.perm_r, lu.perm_c)
587
+
588
+ @pytest.mark.parametrize("splu_fun, rtol", [(splu, 1e-7), (spilu, 1e-1)])
589
+ def test_natural_permc(self, splu_fun, rtol):
590
+ # Test that the "NATURAL" permc_spec does not permute the matrix
591
+ np.random.seed(42)
592
+ n = 500
593
+ p = 0.01
594
+ A = scipy.sparse.random(n, n, p)
595
+ x = np.random.rand(n)
596
+ # Make A diagonal dominant to make sure it is not singular
597
+ A += (n+1)*scipy.sparse.identity(n)
598
+ A_ = csc_matrix(A)
599
+ b = A_ @ x
600
+
601
+ # without permc_spec, permutation is not identity
602
+ lu = splu_fun(A_)
603
+ assert_(np.any(lu.perm_c != np.arange(n)))
604
+
605
+ # with permc_spec="NATURAL", permutation is identity
606
+ lu = splu_fun(A_, permc_spec="NATURAL")
607
+ assert_array_equal(lu.perm_c, np.arange(n))
608
+
609
+ # Also, lu decomposition is valid
610
+ x2 = lu.solve(b)
611
+ assert_allclose(x, x2, rtol=rtol)
612
+
613
+ @pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount")
614
+ def test_lu_refcount(self):
615
+ # Test that we are keeping track of the reference count with splu.
616
+ n = 30
617
+ a = random.random((n, n))
618
+ a[a < 0.95] = 0
619
+ # Make a diagonal dominant, to make sure it is not singular
620
+ a += 4*eye(n)
621
+ a_ = csc_matrix(a)
622
+ lu = splu(a_)
623
+
624
+ # And now test that we don't have a refcount bug
625
+ rc = sys.getrefcount(lu)
626
+ for attr in ('perm_r', 'perm_c'):
627
+ perm = getattr(lu, attr)
628
+ assert_equal(sys.getrefcount(lu), rc + 1)
629
+ del perm
630
+ assert_equal(sys.getrefcount(lu), rc)
631
+
632
+ def test_bad_inputs(self):
633
+ A = self.A.tocsc()
634
+
635
+ assert_raises(ValueError, splu, A[:,:4])
636
+ assert_raises(ValueError, spilu, A[:,:4])
637
+
638
+ for lu in [splu(A), spilu(A)]:
639
+ b = random.rand(42)
640
+ B = random.rand(42, 3)
641
+ BB = random.rand(self.n, 3, 9)
642
+ assert_raises(ValueError, lu.solve, b)
643
+ assert_raises(ValueError, lu.solve, B)
644
+ assert_raises(ValueError, lu.solve, BB)
645
+ assert_raises(TypeError, lu.solve,
646
+ b.astype(np.complex64))
647
+ assert_raises(TypeError, lu.solve,
648
+ b.astype(np.complex128))
649
+
650
+ @sup_sparse_efficiency
651
+ def test_superlu_dlamch_i386_nan(self):
652
+ # SuperLU 4.3 calls some functions returning floats without
653
+ # declaring them. On i386@linux call convention, this fails to
654
+ # clear floating point registers after call. As a result, NaN
655
+ # can appear in the next floating point operation made.
656
+ #
657
+ # Here's a test case that triggered the issue.
658
+ n = 8
659
+ d = np.arange(n) + 1
660
+ A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
661
+ A = A.astype(np.float32)
662
+ spilu(A)
663
+ A = A + 1j*A
664
+ B = A.A
665
+ assert_(not np.isnan(B).any())
666
+
667
+ @sup_sparse_efficiency
668
+ def test_lu_attr(self):
669
+
670
+ def check(dtype, complex_2=False):
671
+ A = self.A.astype(dtype)
672
+
673
+ if complex_2:
674
+ A = A + 1j*A.T
675
+
676
+ n = A.shape[0]
677
+ lu = splu(A)
678
+
679
+ # Check that the decomposition is as advertised
680
+
681
+ Pc = np.zeros((n, n))
682
+ Pc[np.arange(n), lu.perm_c] = 1
683
+
684
+ Pr = np.zeros((n, n))
685
+ Pr[lu.perm_r, np.arange(n)] = 1
686
+
687
+ Ad = A.toarray()
688
+ lhs = Pr.dot(Ad).dot(Pc)
689
+ rhs = (lu.L @ lu.U).toarray()
690
+
691
+ eps = np.finfo(dtype).eps
692
+
693
+ assert_allclose(lhs, rhs, atol=100*eps)
694
+
695
+ check(np.float32)
696
+ check(np.float64)
697
+ check(np.complex64)
698
+ check(np.complex128)
699
+ check(np.complex64, True)
700
+ check(np.complex128, True)
701
+
702
+ @pytest.mark.slow
703
+ @sup_sparse_efficiency
704
+ def test_threads_parallel(self):
705
+ oks = []
706
+
707
+ def worker():
708
+ try:
709
+ self.test_splu_basic()
710
+ self._internal_test_splu_smoketest()
711
+ self._internal_test_spilu_smoketest()
712
+ oks.append(True)
713
+ except Exception:
714
+ pass
715
+
716
+ threads = [threading.Thread(target=worker)
717
+ for k in range(20)]
718
+ for t in threads:
719
+ t.start()
720
+ for t in threads:
721
+ t.join()
722
+
723
+ assert_equal(len(oks), 20)
724
+
725
+
726
+ class TestSpsolveTriangular:
727
+ def setup_method(self):
728
+ use_solver(useUmfpack=False)
729
+
730
+ def test_zero_diagonal(self):
731
+ n = 5
732
+ rng = np.random.default_rng(43876432987)
733
+ A = rng.standard_normal((n, n))
734
+ b = np.arange(n)
735
+ A = scipy.sparse.tril(A, k=0, format='csr')
736
+
737
+ x = spsolve_triangular(A, b, unit_diagonal=True, lower=True)
738
+
739
+ A.setdiag(1)
740
+ assert_allclose(A.dot(x), b)
741
+
742
+ # Regression test from gh-15199
743
+ A = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]], dtype=np.float64)
744
+ b = np.array([1., 2., 3.])
745
+ with suppress_warnings() as sup:
746
+ sup.filter(SparseEfficiencyWarning, "CSR matrix format is")
747
+ spsolve_triangular(A, b, unit_diagonal=True)
748
+
749
+ def test_singular(self):
750
+ n = 5
751
+ A = csr_matrix((n, n))
752
+ b = np.arange(n)
753
+ for lower in (True, False):
754
+ assert_raises(scipy.linalg.LinAlgError,
755
+ spsolve_triangular, A, b, lower=lower)
756
+
757
+ @sup_sparse_efficiency
758
+ def test_bad_shape(self):
759
+ # A is not square.
760
+ A = np.zeros((3, 4))
761
+ b = ones((4, 1))
762
+ assert_raises(ValueError, spsolve_triangular, A, b)
763
+ # A2 and b2 have incompatible shapes.
764
+ A2 = csr_matrix(eye(3))
765
+ b2 = array([1.0, 2.0])
766
+ assert_raises(ValueError, spsolve_triangular, A2, b2)
767
+
768
+ @sup_sparse_efficiency
769
+ def test_input_types(self):
770
+ A = array([[1., 0.], [1., 2.]])
771
+ b = array([[2., 0.], [2., 2.]])
772
+ for matrix_type in (array, csc_matrix, csr_matrix):
773
+ x = spsolve_triangular(matrix_type(A), b, lower=True)
774
+ assert_array_almost_equal(A.dot(x), b)
775
+
776
+ @pytest.mark.slow
777
+ @pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job
778
+ @sup_sparse_efficiency
779
+ def test_random(self):
780
+ def random_triangle_matrix(n, lower=True):
781
+ A = scipy.sparse.random(n, n, density=0.1, format='coo')
782
+ if lower:
783
+ A = scipy.sparse.tril(A)
784
+ else:
785
+ A = scipy.sparse.triu(A)
786
+ A = A.tocsr(copy=False)
787
+ for i in range(n):
788
+ A[i, i] = np.random.rand() + 1
789
+ return A
790
+
791
+ np.random.seed(1234)
792
+ for lower in (True, False):
793
+ for n in (10, 10**2, 10**3):
794
+ A = random_triangle_matrix(n, lower=lower)
795
+ for m in (1, 10):
796
+ for b in (np.random.rand(n, m),
797
+ np.random.randint(-9, 9, (n, m)),
798
+ np.random.randint(-9, 9, (n, m)) +
799
+ np.random.randint(-9, 9, (n, m)) * 1j):
800
+ x = spsolve_triangular(A, b, lower=lower)
801
+ assert_array_almost_equal(A.dot(x), b)
802
+ x = spsolve_triangular(A, b, lower=lower,
803
+ unit_diagonal=True)
804
+ A.setdiag(1)
805
+ assert_array_almost_equal(A.dot(x), b)
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sparse Eigenvalue Solvers
3
+ -------------------------
4
+
5
+ The submodules of sparse.linalg._eigen:
6
+ 1. lobpcg: Locally Optimal Block Preconditioned Conjugate Gradient Method
7
+
8
+ """
9
+ from .arpack import *
10
+ from .lobpcg import *
11
+ from ._svds import svds
12
+
13
+ from . import arpack
14
+
15
+ __all__ = [
16
+ 'ArpackError', 'ArpackNoConvergence',
17
+ 'eigs', 'eigsh', 'lobpcg', 'svds'
18
+ ]
19
+
20
+ from scipy._lib._testutils import PytestTester
21
+ test = PytestTester(__name__)
22
+ del PytestTester
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (658 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/_svds.cpython-310.pyc ADDED
Binary file (17.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/_svds_doc.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/_svds_doc.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def _svds_arpack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
2
+ maxiter=None, return_singular_vectors=True,
3
+ solver='arpack', random_state=None):
4
+ """
5
+ Partial singular value decomposition of a sparse matrix using ARPACK.
6
+
7
+ Compute the largest or smallest `k` singular values and corresponding
8
+ singular vectors of a sparse matrix `A`. The order in which the singular
9
+ values are returned is not guaranteed.
10
+
11
+ In the descriptions below, let ``M, N = A.shape``.
12
+
13
+ Parameters
14
+ ----------
15
+ A : sparse matrix or LinearOperator
16
+ Matrix to decompose.
17
+ k : int, optional
18
+ Number of singular values and singular vectors to compute.
19
+ Must satisfy ``1 <= k <= min(M, N) - 1``.
20
+ Default is 6.
21
+ ncv : int, optional
22
+ The number of Lanczos vectors generated.
23
+ The default is ``min(n, max(2*k + 1, 20))``.
24
+ If specified, must satistify ``k + 1 < ncv < min(M, N)``; ``ncv > 2*k``
25
+ is recommended.
26
+ tol : float, optional
27
+ Tolerance for singular values. Zero (default) means machine precision.
28
+ which : {'LM', 'SM'}
29
+ Which `k` singular values to find: either the largest magnitude ('LM')
30
+ or smallest magnitude ('SM') singular values.
31
+ v0 : ndarray, optional
32
+ The starting vector for iteration:
33
+ an (approximate) left singular vector if ``N > M`` and a right singular
34
+ vector otherwise. Must be of length ``min(M, N)``.
35
+ Default: random
36
+ maxiter : int, optional
37
+ Maximum number of Arnoldi update iterations allowed;
38
+ default is ``min(M, N) * 10``.
39
+ return_singular_vectors : {True, False, "u", "vh"}
40
+ Singular values are always computed and returned; this parameter
41
+ controls the computation and return of singular vectors.
42
+
43
+ - ``True``: return singular vectors.
44
+ - ``False``: do not return singular vectors.
45
+ - ``"u"``: if ``M <= N``, compute only the left singular vectors and
46
+ return ``None`` for the right singular vectors. Otherwise, compute
47
+ all singular vectors.
48
+ - ``"vh"``: if ``M > N``, compute only the right singular vectors and
49
+ return ``None`` for the left singular vectors. Otherwise, compute
50
+ all singular vectors.
51
+
52
+ solver : {'arpack', 'propack', 'lobpcg'}, optional
53
+ This is the solver-specific documentation for ``solver='arpack'``.
54
+ :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>` and
55
+ :ref:`'propack' <sparse.linalg.svds-propack>`
56
+ are also supported.
57
+ random_state : {None, int, `numpy.random.Generator`,
58
+ `numpy.random.RandomState`}, optional
59
+
60
+ Pseudorandom number generator state used to generate resamples.
61
+
62
+ If `random_state` is ``None`` (or `np.random`), the
63
+ `numpy.random.RandomState` singleton is used.
64
+ If `random_state` is an int, a new ``RandomState`` instance is used,
65
+ seeded with `random_state`.
66
+ If `random_state` is already a ``Generator`` or ``RandomState``
67
+ instance then that instance is used.
68
+ options : dict, optional
69
+ A dictionary of solver-specific options. No solver-specific options
70
+ are currently supported; this parameter is reserved for future use.
71
+
72
+ Returns
73
+ -------
74
+ u : ndarray, shape=(M, k)
75
+ Unitary matrix having left singular vectors as columns.
76
+ s : ndarray, shape=(k,)
77
+ The singular values.
78
+ vh : ndarray, shape=(k, N)
79
+ Unitary matrix having right singular vectors as rows.
80
+
81
+ Notes
82
+ -----
83
+ This is a naive implementation using ARPACK as an eigensolver
84
+ on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
85
+ efficient.
86
+
87
+ Examples
88
+ --------
89
+ Construct a matrix ``A`` from singular values and vectors.
90
+
91
+ >>> import numpy as np
92
+ >>> from scipy.stats import ortho_group
93
+ >>> from scipy.sparse import csc_matrix, diags
94
+ >>> from scipy.sparse.linalg import svds
95
+ >>> rng = np.random.default_rng()
96
+ >>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
97
+ >>> s = [0.0001, 0.001, 3, 4, 5] # singular values
98
+ >>> u = orthogonal[:, :5] # left singular vectors
99
+ >>> vT = orthogonal[:, 5:].T # right singular vectors
100
+ >>> A = u @ diags(s) @ vT
101
+
102
+ With only three singular values/vectors, the SVD approximates the original
103
+ matrix.
104
+
105
+ >>> u2, s2, vT2 = svds(A, k=3, solver='arpack')
106
+ >>> A2 = u2 @ np.diag(s2) @ vT2
107
+ >>> np.allclose(A2, A.toarray(), atol=1e-3)
108
+ True
109
+
110
+ With all five singular values/vectors, we can reproduce the original
111
+ matrix.
112
+
113
+ >>> u3, s3, vT3 = svds(A, k=5, solver='arpack')
114
+ >>> A3 = u3 @ np.diag(s3) @ vT3
115
+ >>> np.allclose(A3, A.toarray())
116
+ True
117
+
118
+ The singular values match the expected singular values, and the singular
119
+ vectors are as expected up to a difference in sign.
120
+
121
+ >>> (np.allclose(s3, s) and
122
+ ... np.allclose(np.abs(u3), np.abs(u.toarray())) and
123
+ ... np.allclose(np.abs(vT3), np.abs(vT.toarray())))
124
+ True
125
+
126
+ The singular vectors are also orthogonal.
127
+
128
+ >>> (np.allclose(u3.T @ u3, np.eye(5)) and
129
+ ... np.allclose(vT3 @ vT3.T, np.eye(5)))
130
+ True
131
+ """
132
+ pass
133
+
134
+
135
+ def _svds_lobpcg_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
136
+ maxiter=None, return_singular_vectors=True,
137
+ solver='lobpcg', random_state=None):
138
+ """
139
+ Partial singular value decomposition of a sparse matrix using LOBPCG.
140
+
141
+ Compute the largest or smallest `k` singular values and corresponding
142
+ singular vectors of a sparse matrix `A`. The order in which the singular
143
+ values are returned is not guaranteed.
144
+
145
+ In the descriptions below, let ``M, N = A.shape``.
146
+
147
+ Parameters
148
+ ----------
149
+ A : sparse matrix or LinearOperator
150
+ Matrix to decompose.
151
+ k : int, default: 6
152
+ Number of singular values and singular vectors to compute.
153
+ Must satisfy ``1 <= k <= min(M, N) - 1``.
154
+ ncv : int, optional
155
+ Ignored.
156
+ tol : float, optional
157
+ Tolerance for singular values. Zero (default) means machine precision.
158
+ which : {'LM', 'SM'}
159
+ Which `k` singular values to find: either the largest magnitude ('LM')
160
+ or smallest magnitude ('SM') singular values.
161
+ v0 : ndarray, optional
162
+ If `k` is 1, the starting vector for iteration:
163
+ an (approximate) left singular vector if ``N > M`` and a right singular
164
+ vector otherwise. Must be of length ``min(M, N)``.
165
+ Ignored otherwise.
166
+ Default: random
167
+ maxiter : int, default: 20
168
+ Maximum number of iterations.
169
+ return_singular_vectors : {True, False, "u", "vh"}
170
+ Singular values are always computed and returned; this parameter
171
+ controls the computation and return of singular vectors.
172
+
173
+ - ``True``: return singular vectors.
174
+ - ``False``: do not return singular vectors.
175
+ - ``"u"``: if ``M <= N``, compute only the left singular vectors and
176
+ return ``None`` for the right singular vectors. Otherwise, compute
177
+ all singular vectors.
178
+ - ``"vh"``: if ``M > N``, compute only the right singular vectors and
179
+ return ``None`` for the left singular vectors. Otherwise, compute
180
+ all singular vectors.
181
+
182
+ solver : {'arpack', 'propack', 'lobpcg'}, optional
183
+ This is the solver-specific documentation for ``solver='lobpcg'``.
184
+ :ref:`'arpack' <sparse.linalg.svds-arpack>` and
185
+ :ref:`'propack' <sparse.linalg.svds-propack>`
186
+ are also supported.
187
+ random_state : {None, int, `numpy.random.Generator`,
188
+ `numpy.random.RandomState`}, optional
189
+
190
+ Pseudorandom number generator state used to generate resamples.
191
+
192
+ If `random_state` is ``None`` (or `np.random`), the
193
+ `numpy.random.RandomState` singleton is used.
194
+ If `random_state` is an int, a new ``RandomState`` instance is used,
195
+ seeded with `random_state`.
196
+ If `random_state` is already a ``Generator`` or ``RandomState``
197
+ instance then that instance is used.
198
+ options : dict, optional
199
+ A dictionary of solver-specific options. No solver-specific options
200
+ are currently supported; this parameter is reserved for future use.
201
+
202
+ Returns
203
+ -------
204
+ u : ndarray, shape=(M, k)
205
+ Unitary matrix having left singular vectors as columns.
206
+ s : ndarray, shape=(k,)
207
+ The singular values.
208
+ vh : ndarray, shape=(k, N)
209
+ Unitary matrix having right singular vectors as rows.
210
+
211
+ Notes
212
+ -----
213
+ This is a naive implementation using LOBPCG as an eigensolver
214
+ on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
215
+ efficient.
216
+
217
+ Examples
218
+ --------
219
+ Construct a matrix ``A`` from singular values and vectors.
220
+
221
+ >>> import numpy as np
222
+ >>> from scipy.stats import ortho_group
223
+ >>> from scipy.sparse import csc_matrix, diags
224
+ >>> from scipy.sparse.linalg import svds
225
+ >>> rng = np.random.default_rng()
226
+ >>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
227
+ >>> s = [0.0001, 0.001, 3, 4, 5] # singular values
228
+ >>> u = orthogonal[:, :5] # left singular vectors
229
+ >>> vT = orthogonal[:, 5:].T # right singular vectors
230
+ >>> A = u @ diags(s) @ vT
231
+
232
+ With only three singular values/vectors, the SVD approximates the original
233
+ matrix.
234
+
235
+ >>> u2, s2, vT2 = svds(A, k=3, solver='lobpcg')
236
+ >>> A2 = u2 @ np.diag(s2) @ vT2
237
+ >>> np.allclose(A2, A.toarray(), atol=1e-3)
238
+ True
239
+
240
+ With all five singular values/vectors, we can reproduce the original
241
+ matrix.
242
+
243
+ >>> u3, s3, vT3 = svds(A, k=5, solver='lobpcg')
244
+ >>> A3 = u3 @ np.diag(s3) @ vT3
245
+ >>> np.allclose(A3, A.toarray())
246
+ True
247
+
248
+ The singular values match the expected singular values, and the singular
249
+ vectors are as expected up to a difference in sign.
250
+
251
+ >>> (np.allclose(s3, s) and
252
+ ... np.allclose(np.abs(u3), np.abs(u.todense())) and
253
+ ... np.allclose(np.abs(vT3), np.abs(vT.todense())))
254
+ True
255
+
256
+ The singular vectors are also orthogonal.
257
+
258
+ >>> (np.allclose(u3.T @ u3, np.eye(5)) and
259
+ ... np.allclose(vT3 @ vT3.T, np.eye(5)))
260
+ True
261
+
262
+ """
263
+ pass
264
+
265
+
266
+ def _svds_propack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
267
+ maxiter=None, return_singular_vectors=True,
268
+ solver='propack', random_state=None):
269
+ """
270
+ Partial singular value decomposition of a sparse matrix using PROPACK.
271
+
272
+ Compute the largest or smallest `k` singular values and corresponding
273
+ singular vectors of a sparse matrix `A`. The order in which the singular
274
+ values are returned is not guaranteed.
275
+
276
+ In the descriptions below, let ``M, N = A.shape``.
277
+
278
+ Parameters
279
+ ----------
280
+ A : sparse matrix or LinearOperator
281
+ Matrix to decompose. If `A` is a ``LinearOperator``
282
+ object, it must define both ``matvec`` and ``rmatvec`` methods.
283
+ k : int, default: 6
284
+ Number of singular values and singular vectors to compute.
285
+ Must satisfy ``1 <= k <= min(M, N)``.
286
+ ncv : int, optional
287
+ Ignored.
288
+ tol : float, optional
289
+ The desired relative accuracy for computed singular values.
290
+ Zero (default) means machine precision.
291
+ which : {'LM', 'SM'}
292
+ Which `k` singular values to find: either the largest magnitude ('LM')
293
+ or smallest magnitude ('SM') singular values. Note that choosing
294
+ ``which='SM'`` will force the ``irl`` option to be set ``True``.
295
+ v0 : ndarray, optional
296
+ Starting vector for iterations: must be of length ``A.shape[0]``.
297
+ If not specified, PROPACK will generate a starting vector.
298
+ maxiter : int, optional
299
+ Maximum number of iterations / maximal dimension of the Krylov
300
+ subspace. Default is ``10 * k``.
301
+ return_singular_vectors : {True, False, "u", "vh"}
302
+ Singular values are always computed and returned; this parameter
303
+ controls the computation and return of singular vectors.
304
+
305
+ - ``True``: return singular vectors.
306
+ - ``False``: do not return singular vectors.
307
+ - ``"u"``: compute only the left singular vectors; return ``None`` for
308
+ the right singular vectors.
309
+ - ``"vh"``: compute only the right singular vectors; return ``None``
310
+ for the left singular vectors.
311
+
312
+ solver : {'arpack', 'propack', 'lobpcg'}, optional
313
+ This is the solver-specific documentation for ``solver='propack'``.
314
+ :ref:`'arpack' <sparse.linalg.svds-arpack>` and
315
+ :ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`
316
+ are also supported.
317
+ random_state : {None, int, `numpy.random.Generator`,
318
+ `numpy.random.RandomState`}, optional
319
+
320
+ Pseudorandom number generator state used to generate resamples.
321
+
322
+ If `random_state` is ``None`` (or `np.random`), the
323
+ `numpy.random.RandomState` singleton is used.
324
+ If `random_state` is an int, a new ``RandomState`` instance is used,
325
+ seeded with `random_state`.
326
+ If `random_state` is already a ``Generator`` or ``RandomState``
327
+ instance then that instance is used.
328
+ options : dict, optional
329
+ A dictionary of solver-specific options. No solver-specific options
330
+ are currently supported; this parameter is reserved for future use.
331
+
332
+ Returns
333
+ -------
334
+ u : ndarray, shape=(M, k)
335
+ Unitary matrix having left singular vectors as columns.
336
+ s : ndarray, shape=(k,)
337
+ The singular values.
338
+ vh : ndarray, shape=(k, N)
339
+ Unitary matrix having right singular vectors as rows.
340
+
341
+ Notes
342
+ -----
343
+ This is an interface to the Fortran library PROPACK [1]_.
344
+ The current default is to run with IRL mode disabled unless seeking the
345
+ smallest singular values/vectors (``which='SM'``).
346
+
347
+ References
348
+ ----------
349
+
350
+ .. [1] Larsen, Rasmus Munk. "PROPACK-Software for large and sparse SVD
351
+ calculations." Available online. URL
352
+ http://sun.stanford.edu/~rmunk/PROPACK (2004): 2008-2009.
353
+
354
+ Examples
355
+ --------
356
+ Construct a matrix ``A`` from singular values and vectors.
357
+
358
+ >>> import numpy as np
359
+ >>> from scipy.stats import ortho_group
360
+ >>> from scipy.sparse import csc_matrix, diags
361
+ >>> from scipy.sparse.linalg import svds
362
+ >>> rng = np.random.default_rng()
363
+ >>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
364
+ >>> s = [0.0001, 0.001, 3, 4, 5] # singular values
365
+ >>> u = orthogonal[:, :5] # left singular vectors
366
+ >>> vT = orthogonal[:, 5:].T # right singular vectors
367
+ >>> A = u @ diags(s) @ vT
368
+
369
+ With only three singular values/vectors, the SVD approximates the original
370
+ matrix.
371
+
372
+ >>> u2, s2, vT2 = svds(A, k=3, solver='propack')
373
+ >>> A2 = u2 @ np.diag(s2) @ vT2
374
+ >>> np.allclose(A2, A.todense(), atol=1e-3)
375
+ True
376
+
377
+ With all five singular values/vectors, we can reproduce the original
378
+ matrix.
379
+
380
+ >>> u3, s3, vT3 = svds(A, k=5, solver='propack')
381
+ >>> A3 = u3 @ np.diag(s3) @ vT3
382
+ >>> np.allclose(A3, A.todense())
383
+ True
384
+
385
+ The singular values match the expected singular values, and the singular
386
+ vectors are as expected up to a difference in sign.
387
+
388
+ >>> (np.allclose(s3, s) and
389
+ ... np.allclose(np.abs(u3), np.abs(u.toarray())) and
390
+ ... np.allclose(np.abs(vT3), np.abs(vT.toarray())))
391
+ True
392
+
393
+ The singular vectors are also orthogonal.
394
+
395
+ >>> (np.allclose(u3.T @ u3, np.eye(5)) and
396
+ ... np.allclose(vT3 @ vT3.T, np.eye(5)))
397
+ True
398
+
399
+ """
400
+ pass
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/COPYING ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ BSD Software License
3
+
4
+ Pertains to ARPACK and P_ARPACK
5
+
6
+ Copyright (c) 1996-2008 Rice University.
7
+ Developed by D.C. Sorensen, R.B. Lehoucq, C. Yang, and K. Maschhoff.
8
+ All rights reserved.
9
+
10
+ Arpack has been renamed to arpack-ng.
11
+
12
+ Copyright (c) 2001-2011 - Scilab Enterprises
13
+ Updated by Allan Cornet, Sylvestre Ledru.
14
+
15
+ Copyright (c) 2010 - Jordi Gutiérrez Hermoso (Octave patch)
16
+
17
+ Copyright (c) 2007 - Sébastien Fabbro (gentoo patch)
18
+
19
+ Redistribution and use in source and binary forms, with or without
20
+ modification, are permitted provided that the following conditions are
21
+ met:
22
+
23
+ - Redistributions of source code must retain the above copyright
24
+ notice, this list of conditions and the following disclaimer.
25
+
26
+ - Redistributions in binary form must reproduce the above copyright
27
+ notice, this list of conditions and the following disclaimer listed
28
+ in this license in the documentation and/or other materials
29
+ provided with the distribution.
30
+
31
+ - Neither the name of the copyright holders nor the names of its
32
+ contributors may be used to endorse or promote products derived from
33
+ this software without specific prior written permission.
34
+
35
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Eigenvalue solver using iterative methods.
3
+
4
+ Find k eigenvectors and eigenvalues of a matrix A using the
5
+ Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_.
6
+
7
+ These methods are most useful for large sparse matrices.
8
+
9
+ - eigs(A,k)
10
+ - eigsh(A,k)
11
+
12
+ References
13
+ ----------
14
+ .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
15
+ .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
16
+ Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
17
+ Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
18
+
19
+ """
20
+ from .arpack import *
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (782 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-310.pyc ADDED
Binary file (44.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (486 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/arpack.py ADDED
@@ -0,0 +1,1702 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Find a few eigenvectors and eigenvalues of a matrix.
3
+
4
+
5
+ Uses ARPACK: https://github.com/opencollab/arpack-ng
6
+
7
+ """
8
+ # Wrapper implementation notes
9
+ #
10
+ # ARPACK Entry Points
11
+ # -------------------
12
+ # The entry points to ARPACK are
13
+ # - (s,d)seupd : single and double precision symmetric matrix
14
+ # - (s,d,c,z)neupd: single,double,complex,double complex general matrix
15
+ # This wrapper puts the *neupd (general matrix) interfaces in eigs()
16
+ # and the *seupd (symmetric matrix) in eigsh().
17
+ # There is no specialized interface for complex Hermitian matrices.
18
+ # To find eigenvalues of a complex Hermitian matrix you
19
+ # may use eigsh(), but eigsh() will simply call eigs()
20
+ # and return the real part of the eigenvalues thus obtained.
21
+
22
+ # Number of eigenvalues returned and complex eigenvalues
23
+ # ------------------------------------------------------
24
+ # The ARPACK nonsymmetric real and double interface (s,d)naupd return
25
+ # eigenvalues and eigenvectors in real (float,double) arrays.
26
+ # Since the eigenvalues and eigenvectors are, in general, complex
27
+ # ARPACK puts the real and imaginary parts in consecutive entries
28
+ # in real-valued arrays. This wrapper puts the real entries
29
+ # into complex data types and attempts to return the requested eigenvalues
30
+ # and eigenvectors.
31
+
32
+
33
+ # Solver modes
34
+ # ------------
35
+ # ARPACK and handle shifted and shift-inverse computations
36
+ # for eigenvalues by providing a shift (sigma) and a solver.
37
+
38
+ import numpy as np
39
+ import warnings
40
+ from scipy.sparse.linalg._interface import aslinearoperator, LinearOperator
41
+ from scipy.sparse import eye, issparse
42
+ from scipy.linalg import eig, eigh, lu_factor, lu_solve
43
+ from scipy.sparse._sputils import isdense, is_pydata_spmatrix
44
+ from scipy.sparse.linalg import gmres, splu
45
+ from scipy._lib._util import _aligned_zeros
46
+ from scipy._lib._threadsafety import ReentrancyLock
47
+
48
+ from . import _arpack
49
+ arpack_int = _arpack.timing.nbx.dtype
50
+
51
+ __docformat__ = "restructuredtext en"
52
+
53
+ __all__ = ['eigs', 'eigsh', 'ArpackError', 'ArpackNoConvergence']
54
+
55
+
56
+ _type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
57
+ _ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
58
+
59
+ DNAUPD_ERRORS = {
60
+ 0: "Normal exit.",
61
+ 1: "Maximum number of iterations taken. "
62
+ "All possible eigenvalues of OP has been found. IPARAM(5) "
63
+ "returns the number of wanted converged Ritz values.",
64
+ 2: "No longer an informational error. Deprecated starting "
65
+ "with release 2 of ARPACK.",
66
+ 3: "No shifts could be applied during a cycle of the "
67
+ "Implicitly restarted Arnoldi iteration. One possibility "
68
+ "is to increase the size of NCV relative to NEV. ",
69
+ -1: "N must be positive.",
70
+ -2: "NEV must be positive.",
71
+ -3: "NCV-NEV >= 2 and less than or equal to N.",
72
+ -4: "The maximum number of Arnoldi update iterations allowed "
73
+ "must be greater than zero.",
74
+ -5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
75
+ -6: "BMAT must be one of 'I' or 'G'.",
76
+ -7: "Length of private work array WORKL is not sufficient.",
77
+ -8: "Error return from LAPACK eigenvalue calculation;",
78
+ -9: "Starting vector is zero.",
79
+ -10: "IPARAM(7) must be 1,2,3,4.",
80
+ -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
81
+ -12: "IPARAM(1) must be equal to 0 or 1.",
82
+ -13: "NEV and WHICH = 'BE' are incompatible.",
83
+ -9999: "Could not build an Arnoldi factorization. "
84
+ "IPARAM(5) returns the size of the current Arnoldi "
85
+ "factorization. The user is advised to check that "
86
+ "enough workspace and array storage has been allocated."
87
+ }
88
+
89
+ SNAUPD_ERRORS = DNAUPD_ERRORS
90
+
91
+ ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
92
+ ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
93
+
94
+ CNAUPD_ERRORS = ZNAUPD_ERRORS
95
+
96
+ DSAUPD_ERRORS = {
97
+ 0: "Normal exit.",
98
+ 1: "Maximum number of iterations taken. "
99
+ "All possible eigenvalues of OP has been found.",
100
+ 2: "No longer an informational error. Deprecated starting with "
101
+ "release 2 of ARPACK.",
102
+ 3: "No shifts could be applied during a cycle of the Implicitly "
103
+ "restarted Arnoldi iteration. One possibility is to increase "
104
+ "the size of NCV relative to NEV. ",
105
+ -1: "N must be positive.",
106
+ -2: "NEV must be positive.",
107
+ -3: "NCV must be greater than NEV and less than or equal to N.",
108
+ -4: "The maximum number of Arnoldi update iterations allowed "
109
+ "must be greater than zero.",
110
+ -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
111
+ -6: "BMAT must be one of 'I' or 'G'.",
112
+ -7: "Length of private work array WORKL is not sufficient.",
113
+ -8: "Error return from trid. eigenvalue calculation; "
114
+ "Informational error from LAPACK routine dsteqr .",
115
+ -9: "Starting vector is zero.",
116
+ -10: "IPARAM(7) must be 1,2,3,4,5.",
117
+ -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
118
+ -12: "IPARAM(1) must be equal to 0 or 1.",
119
+ -13: "NEV and WHICH = 'BE' are incompatible. ",
120
+ -9999: "Could not build an Arnoldi factorization. "
121
+ "IPARAM(5) returns the size of the current Arnoldi "
122
+ "factorization. The user is advised to check that "
123
+ "enough workspace and array storage has been allocated.",
124
+ }
125
+
126
+ SSAUPD_ERRORS = DSAUPD_ERRORS
127
+
128
+ DNEUPD_ERRORS = {
129
+ 0: "Normal exit.",
130
+ 1: "The Schur form computed by LAPACK routine dlahqr "
131
+ "could not be reordered by LAPACK routine dtrsen. "
132
+ "Re-enter subroutine dneupd with IPARAM(5)NCV and "
133
+ "increase the size of the arrays DR and DI to have "
134
+ "dimension at least dimension NCV and allocate at least NCV "
135
+ "columns for Z. NOTE: Not necessary if Z and V share "
136
+ "the same space. Please notify the authors if this error"
137
+ "occurs.",
138
+ -1: "N must be positive.",
139
+ -2: "NEV must be positive.",
140
+ -3: "NCV-NEV >= 2 and less than or equal to N.",
141
+ -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
142
+ -6: "BMAT must be one of 'I' or 'G'.",
143
+ -7: "Length of private work WORKL array is not sufficient.",
144
+ -8: "Error return from calculation of a real Schur form. "
145
+ "Informational error from LAPACK routine dlahqr .",
146
+ -9: "Error return from calculation of eigenvectors. "
147
+ "Informational error from LAPACK routine dtrevc.",
148
+ -10: "IPARAM(7) must be 1,2,3,4.",
149
+ -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
150
+ -12: "HOWMNY = 'S' not yet implemented",
151
+ -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
152
+ -14: "DNAUPD did not find any eigenvalues to sufficient "
153
+ "accuracy.",
154
+ -15: "DNEUPD got a different count of the number of converged "
155
+ "Ritz values than DNAUPD got. This indicates the user "
156
+ "probably made an error in passing data from DNAUPD to "
157
+ "DNEUPD or that the data was modified before entering "
158
+ "DNEUPD",
159
+ }
160
+
161
+ SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
162
+ SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
163
+ "could not be reordered by LAPACK routine strsen . "
164
+ "Re-enter subroutine dneupd with IPARAM(5)=NCV and "
165
+ "increase the size of the arrays DR and DI to have "
166
+ "dimension at least dimension NCV and allocate at least "
167
+ "NCV columns for Z. NOTE: Not necessary if Z and V share "
168
+ "the same space. Please notify the authors if this error "
169
+ "occurs.")
170
+ SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
171
+ "accuracy.")
172
+ SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
173
+ "converged Ritz values than SNAUPD got. This indicates "
174
+ "the user probably made an error in passing data from "
175
+ "SNAUPD to SNEUPD or that the data was modified before "
176
+ "entering SNEUPD")
177
+
178
+ ZNEUPD_ERRORS = {0: "Normal exit.",
179
+ 1: "The Schur form computed by LAPACK routine csheqr "
180
+ "could not be reordered by LAPACK routine ztrsen. "
181
+ "Re-enter subroutine zneupd with IPARAM(5)=NCV and "
182
+ "increase the size of the array D to have "
183
+ "dimension at least dimension NCV and allocate at least "
184
+ "NCV columns for Z. NOTE: Not necessary if Z and V share "
185
+ "the same space. Please notify the authors if this error "
186
+ "occurs.",
187
+ -1: "N must be positive.",
188
+ -2: "NEV must be positive.",
189
+ -3: "NCV-NEV >= 1 and less than or equal to N.",
190
+ -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
191
+ -6: "BMAT must be one of 'I' or 'G'.",
192
+ -7: "Length of private work WORKL array is not sufficient.",
193
+ -8: "Error return from LAPACK eigenvalue calculation. "
194
+ "This should never happened.",
195
+ -9: "Error return from calculation of eigenvectors. "
196
+ "Informational error from LAPACK routine ztrevc.",
197
+ -10: "IPARAM(7) must be 1,2,3",
198
+ -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
199
+ -12: "HOWMNY = 'S' not yet implemented",
200
+ -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
201
+ -14: "ZNAUPD did not find any eigenvalues to sufficient "
202
+ "accuracy.",
203
+ -15: "ZNEUPD got a different count of the number of "
204
+ "converged Ritz values than ZNAUPD got. This "
205
+ "indicates the user probably made an error in passing "
206
+ "data from ZNAUPD to ZNEUPD or that the data was "
207
+ "modified before entering ZNEUPD"
208
+ }
209
+
210
+ CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
211
+ CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
212
+ "accuracy.")
213
+ CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
214
+ "converged Ritz values than CNAUPD got. This indicates "
215
+ "the user probably made an error in passing data from "
216
+ "CNAUPD to CNEUPD or that the data was modified before "
217
+ "entering CNEUPD")
218
+
219
+ DSEUPD_ERRORS = {
220
+ 0: "Normal exit.",
221
+ -1: "N must be positive.",
222
+ -2: "NEV must be positive.",
223
+ -3: "NCV must be greater than NEV and less than or equal to N.",
224
+ -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
225
+ -6: "BMAT must be one of 'I' or 'G'.",
226
+ -7: "Length of private work WORKL array is not sufficient.",
227
+ -8: ("Error return from trid. eigenvalue calculation; "
228
+ "Information error from LAPACK routine dsteqr."),
229
+ -9: "Starting vector is zero.",
230
+ -10: "IPARAM(7) must be 1,2,3,4,5.",
231
+ -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
232
+ -12: "NEV and WHICH = 'BE' are incompatible.",
233
+ -14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
234
+ -15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
235
+ -16: "HOWMNY = 'S' not yet implemented",
236
+ -17: ("DSEUPD got a different count of the number of converged "
237
+ "Ritz values than DSAUPD got. This indicates the user "
238
+ "probably made an error in passing data from DSAUPD to "
239
+ "DSEUPD or that the data was modified before entering "
240
+ "DSEUPD.")
241
+ }
242
+
243
+ SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
244
+ SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
245
+ "to sufficient accuracy.")
246
+ SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
247
+ "converged "
248
+ "Ritz values than SSAUPD got. This indicates the user "
249
+ "probably made an error in passing data from SSAUPD to "
250
+ "SSEUPD or that the data was modified before entering "
251
+ "SSEUPD.")
252
+
253
+ _SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
254
+ 's': SSAUPD_ERRORS}
255
+ _NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
256
+ 's': SNAUPD_ERRORS,
257
+ 'z': ZNAUPD_ERRORS,
258
+ 'c': CNAUPD_ERRORS}
259
+ _SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
260
+ 's': SSEUPD_ERRORS}
261
+ _NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
262
+ 's': SNEUPD_ERRORS,
263
+ 'z': ZNEUPD_ERRORS,
264
+ 'c': CNEUPD_ERRORS}
265
+
266
+ # accepted values of parameter WHICH in _SEUPD
267
+ _SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
268
+
269
+ # accepted values of parameter WHICH in _NAUPD
270
+ _NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
271
+
272
+
273
+ class ArpackError(RuntimeError):
274
+ """
275
+ ARPACK error
276
+ """
277
+
278
+ def __init__(self, info, infodict=_NAUPD_ERRORS):
279
+ msg = infodict.get(info, "Unknown error")
280
+ RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
281
+
282
+
283
+ class ArpackNoConvergence(ArpackError):
284
+ """
285
+ ARPACK iteration did not converge
286
+
287
+ Attributes
288
+ ----------
289
+ eigenvalues : ndarray
290
+ Partial result. Converged eigenvalues.
291
+ eigenvectors : ndarray
292
+ Partial result. Converged eigenvectors.
293
+
294
+ """
295
+
296
+ def __init__(self, msg, eigenvalues, eigenvectors):
297
+ ArpackError.__init__(self, -1, {-1: msg})
298
+ self.eigenvalues = eigenvalues
299
+ self.eigenvectors = eigenvectors
300
+
301
+
302
+ def choose_ncv(k):
303
+ """
304
+ Choose number of lanczos vectors based on target number
305
+ of singular/eigen values and vectors to compute, k.
306
+ """
307
+ return max(2 * k + 1, 20)
308
+
309
+
310
+ class _ArpackParams:
311
+ def __init__(self, n, k, tp, mode=1, sigma=None,
312
+ ncv=None, v0=None, maxiter=None, which="LM", tol=0):
313
+ if k <= 0:
314
+ raise ValueError("k must be positive, k=%d" % k)
315
+
316
+ if maxiter is None:
317
+ maxiter = n * 10
318
+ if maxiter <= 0:
319
+ raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
320
+
321
+ if tp not in 'fdFD':
322
+ raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
323
+
324
+ if v0 is not None:
325
+ # ARPACK overwrites its initial resid, make a copy
326
+ self.resid = np.array(v0, copy=True)
327
+ info = 1
328
+ else:
329
+ # ARPACK will use a random initial vector.
330
+ self.resid = np.zeros(n, tp)
331
+ info = 0
332
+
333
+ if sigma is None:
334
+ #sigma not used
335
+ self.sigma = 0
336
+ else:
337
+ self.sigma = sigma
338
+
339
+ if ncv is None:
340
+ ncv = choose_ncv(k)
341
+ ncv = min(ncv, n)
342
+
343
+ self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
344
+ self.iparam = np.zeros(11, arpack_int)
345
+
346
+ # set solver mode and parameters
347
+ ishfts = 1
348
+ self.mode = mode
349
+ self.iparam[0] = ishfts
350
+ self.iparam[2] = maxiter
351
+ self.iparam[3] = 1
352
+ self.iparam[6] = mode
353
+
354
+ self.n = n
355
+ self.tol = tol
356
+ self.k = k
357
+ self.maxiter = maxiter
358
+ self.ncv = ncv
359
+ self.which = which
360
+ self.tp = tp
361
+ self.info = info
362
+
363
+ self.converged = False
364
+ self.ido = 0
365
+
366
+ def _raise_no_convergence(self):
367
+ msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
368
+ k_ok = self.iparam[4]
369
+ num_iter = self.iparam[2]
370
+ try:
371
+ ev, vec = self.extract(True)
372
+ except ArpackError as err:
373
+ msg = f"{msg} [{err}]"
374
+ ev = np.zeros((0,))
375
+ vec = np.zeros((self.n, 0))
376
+ k_ok = 0
377
+ raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
378
+
379
+
380
+ class _SymmetricArpackParams(_ArpackParams):
381
+ def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
382
+ Minv_matvec=None, sigma=None,
383
+ ncv=None, v0=None, maxiter=None, which="LM", tol=0):
384
+ # The following modes are supported:
385
+ # mode = 1:
386
+ # Solve the standard eigenvalue problem:
387
+ # A*x = lambda*x :
388
+ # A - symmetric
389
+ # Arguments should be
390
+ # matvec = left multiplication by A
391
+ # M_matvec = None [not used]
392
+ # Minv_matvec = None [not used]
393
+ #
394
+ # mode = 2:
395
+ # Solve the general eigenvalue problem:
396
+ # A*x = lambda*M*x
397
+ # A - symmetric
398
+ # M - symmetric positive definite
399
+ # Arguments should be
400
+ # matvec = left multiplication by A
401
+ # M_matvec = left multiplication by M
402
+ # Minv_matvec = left multiplication by M^-1
403
+ #
404
+ # mode = 3:
405
+ # Solve the general eigenvalue problem in shift-invert mode:
406
+ # A*x = lambda*M*x
407
+ # A - symmetric
408
+ # M - symmetric positive semi-definite
409
+ # Arguments should be
410
+ # matvec = None [not used]
411
+ # M_matvec = left multiplication by M
412
+ # or None, if M is the identity
413
+ # Minv_matvec = left multiplication by [A-sigma*M]^-1
414
+ #
415
+ # mode = 4:
416
+ # Solve the general eigenvalue problem in Buckling mode:
417
+ # A*x = lambda*AG*x
418
+ # A - symmetric positive semi-definite
419
+ # AG - symmetric indefinite
420
+ # Arguments should be
421
+ # matvec = left multiplication by A
422
+ # M_matvec = None [not used]
423
+ # Minv_matvec = left multiplication by [A-sigma*AG]^-1
424
+ #
425
+ # mode = 5:
426
+ # Solve the general eigenvalue problem in Cayley-transformed mode:
427
+ # A*x = lambda*M*x
428
+ # A - symmetric
429
+ # M - symmetric positive semi-definite
430
+ # Arguments should be
431
+ # matvec = left multiplication by A
432
+ # M_matvec = left multiplication by M
433
+ # or None, if M is the identity
434
+ # Minv_matvec = left multiplication by [A-sigma*M]^-1
435
+ if mode == 1:
436
+ if matvec is None:
437
+ raise ValueError("matvec must be specified for mode=1")
438
+ if M_matvec is not None:
439
+ raise ValueError("M_matvec cannot be specified for mode=1")
440
+ if Minv_matvec is not None:
441
+ raise ValueError("Minv_matvec cannot be specified for mode=1")
442
+
443
+ self.OP = matvec
444
+ self.B = lambda x: x
445
+ self.bmat = 'I'
446
+ elif mode == 2:
447
+ if matvec is None:
448
+ raise ValueError("matvec must be specified for mode=2")
449
+ if M_matvec is None:
450
+ raise ValueError("M_matvec must be specified for mode=2")
451
+ if Minv_matvec is None:
452
+ raise ValueError("Minv_matvec must be specified for mode=2")
453
+
454
+ self.OP = lambda x: Minv_matvec(matvec(x))
455
+ self.OPa = Minv_matvec
456
+ self.OPb = matvec
457
+ self.B = M_matvec
458
+ self.bmat = 'G'
459
+ elif mode == 3:
460
+ if matvec is not None:
461
+ raise ValueError("matvec must not be specified for mode=3")
462
+ if Minv_matvec is None:
463
+ raise ValueError("Minv_matvec must be specified for mode=3")
464
+
465
+ if M_matvec is None:
466
+ self.OP = Minv_matvec
467
+ self.OPa = Minv_matvec
468
+ self.B = lambda x: x
469
+ self.bmat = 'I'
470
+ else:
471
+ self.OP = lambda x: Minv_matvec(M_matvec(x))
472
+ self.OPa = Minv_matvec
473
+ self.B = M_matvec
474
+ self.bmat = 'G'
475
+ elif mode == 4:
476
+ if matvec is None:
477
+ raise ValueError("matvec must be specified for mode=4")
478
+ if M_matvec is not None:
479
+ raise ValueError("M_matvec must not be specified for mode=4")
480
+ if Minv_matvec is None:
481
+ raise ValueError("Minv_matvec must be specified for mode=4")
482
+ self.OPa = Minv_matvec
483
+ self.OP = lambda x: self.OPa(matvec(x))
484
+ self.B = matvec
485
+ self.bmat = 'G'
486
+ elif mode == 5:
487
+ if matvec is None:
488
+ raise ValueError("matvec must be specified for mode=5")
489
+ if Minv_matvec is None:
490
+ raise ValueError("Minv_matvec must be specified for mode=5")
491
+
492
+ self.OPa = Minv_matvec
493
+ self.A_matvec = matvec
494
+
495
+ if M_matvec is None:
496
+ self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
497
+ self.B = lambda x: x
498
+ self.bmat = 'I'
499
+ else:
500
+ self.OP = lambda x: Minv_matvec(matvec(x)
501
+ + sigma * M_matvec(x))
502
+ self.B = M_matvec
503
+ self.bmat = 'G'
504
+ else:
505
+ raise ValueError("mode=%i not implemented" % mode)
506
+
507
+ if which not in _SEUPD_WHICH:
508
+ raise ValueError("which must be one of %s"
509
+ % ' '.join(_SEUPD_WHICH))
510
+ if k >= n:
511
+ raise ValueError("k must be less than ndim(A), k=%d" % k)
512
+
513
+ _ArpackParams.__init__(self, n, k, tp, mode, sigma,
514
+ ncv, v0, maxiter, which, tol)
515
+
516
+ if self.ncv > n or self.ncv <= k:
517
+ raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
518
+
519
+ # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
520
+ self.workd = _aligned_zeros(3 * n, self.tp)
521
+ self.workl = _aligned_zeros(self.ncv * (self.ncv + 8), self.tp)
522
+
523
+ ltr = _type_conv[self.tp]
524
+ if ltr not in ["s", "d"]:
525
+ raise ValueError("Input matrix is not real-valued.")
526
+
527
+ self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
528
+ self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
529
+
530
+ self.iterate_infodict = _SAUPD_ERRORS[ltr]
531
+ self.extract_infodict = _SEUPD_ERRORS[ltr]
532
+
533
+ self.ipntr = np.zeros(11, arpack_int)
534
+
535
+ def iterate(self):
536
+ self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info = \
537
+ self._arpack_solver(self.ido, self.bmat, self.which, self.k,
538
+ self.tol, self.resid, self.v, self.iparam,
539
+ self.ipntr, self.workd, self.workl, self.info)
540
+
541
+ xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
542
+ yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
543
+ if self.ido == -1:
544
+ # initialization
545
+ self.workd[yslice] = self.OP(self.workd[xslice])
546
+ elif self.ido == 1:
547
+ # compute y = Op*x
548
+ if self.mode == 1:
549
+ self.workd[yslice] = self.OP(self.workd[xslice])
550
+ elif self.mode == 2:
551
+ self.workd[xslice] = self.OPb(self.workd[xslice])
552
+ self.workd[yslice] = self.OPa(self.workd[xslice])
553
+ elif self.mode == 5:
554
+ Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
555
+ Ax = self.A_matvec(self.workd[xslice])
556
+ self.workd[yslice] = self.OPa(Ax + (self.sigma *
557
+ self.workd[Bxslice]))
558
+ else:
559
+ Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
560
+ self.workd[yslice] = self.OPa(self.workd[Bxslice])
561
+ elif self.ido == 2:
562
+ self.workd[yslice] = self.B(self.workd[xslice])
563
+ elif self.ido == 3:
564
+ raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
565
+ else:
566
+ self.converged = True
567
+
568
+ if self.info == 0:
569
+ pass
570
+ elif self.info == 1:
571
+ self._raise_no_convergence()
572
+ else:
573
+ raise ArpackError(self.info, infodict=self.iterate_infodict)
574
+
575
+ def extract(self, return_eigenvectors):
576
+ rvec = return_eigenvectors
577
+ ierr = 0
578
+ howmny = 'A' # return all eigenvectors
579
+ sselect = np.zeros(self.ncv, 'int') # unused
580
+ d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
581
+ self.bmat, self.which, self.k,
582
+ self.tol, self.resid, self.v,
583
+ self.iparam[0:7], self.ipntr,
584
+ self.workd[0:2 * self.n],
585
+ self.workl, ierr)
586
+ if ierr != 0:
587
+ raise ArpackError(ierr, infodict=self.extract_infodict)
588
+ k_ok = self.iparam[4]
589
+ d = d[:k_ok]
590
+ z = z[:, :k_ok]
591
+
592
+ if return_eigenvectors:
593
+ return d, z
594
+ else:
595
+ return d
596
+
597
+
598
+ class _UnsymmetricArpackParams(_ArpackParams):
599
+ def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
600
+ Minv_matvec=None, sigma=None,
601
+ ncv=None, v0=None, maxiter=None, which="LM", tol=0):
602
+ # The following modes are supported:
603
+ # mode = 1:
604
+ # Solve the standard eigenvalue problem:
605
+ # A*x = lambda*x
606
+ # A - square matrix
607
+ # Arguments should be
608
+ # matvec = left multiplication by A
609
+ # M_matvec = None [not used]
610
+ # Minv_matvec = None [not used]
611
+ #
612
+ # mode = 2:
613
+ # Solve the generalized eigenvalue problem:
614
+ # A*x = lambda*M*x
615
+ # A - square matrix
616
+ # M - symmetric, positive semi-definite
617
+ # Arguments should be
618
+ # matvec = left multiplication by A
619
+ # M_matvec = left multiplication by M
620
+ # Minv_matvec = left multiplication by M^-1
621
+ #
622
+ # mode = 3,4:
623
+ # Solve the general eigenvalue problem in shift-invert mode:
624
+ # A*x = lambda*M*x
625
+ # A - square matrix
626
+ # M - symmetric, positive semi-definite
627
+ # Arguments should be
628
+ # matvec = None [not used]
629
+ # M_matvec = left multiplication by M
630
+ # or None, if M is the identity
631
+ # Minv_matvec = left multiplication by [A-sigma*M]^-1
632
+ # if A is real and mode==3, use the real part of Minv_matvec
633
+ # if A is real and mode==4, use the imag part of Minv_matvec
634
+ # if A is complex and mode==3,
635
+ # use real and imag parts of Minv_matvec
636
+ if mode == 1:
637
+ if matvec is None:
638
+ raise ValueError("matvec must be specified for mode=1")
639
+ if M_matvec is not None:
640
+ raise ValueError("M_matvec cannot be specified for mode=1")
641
+ if Minv_matvec is not None:
642
+ raise ValueError("Minv_matvec cannot be specified for mode=1")
643
+
644
+ self.OP = matvec
645
+ self.B = lambda x: x
646
+ self.bmat = 'I'
647
+ elif mode == 2:
648
+ if matvec is None:
649
+ raise ValueError("matvec must be specified for mode=2")
650
+ if M_matvec is None:
651
+ raise ValueError("M_matvec must be specified for mode=2")
652
+ if Minv_matvec is None:
653
+ raise ValueError("Minv_matvec must be specified for mode=2")
654
+
655
+ self.OP = lambda x: Minv_matvec(matvec(x))
656
+ self.OPa = Minv_matvec
657
+ self.OPb = matvec
658
+ self.B = M_matvec
659
+ self.bmat = 'G'
660
+ elif mode in (3, 4):
661
+ if matvec is None:
662
+ raise ValueError("matvec must be specified "
663
+ "for mode in (3,4)")
664
+ if Minv_matvec is None:
665
+ raise ValueError("Minv_matvec must be specified "
666
+ "for mode in (3,4)")
667
+
668
+ self.matvec = matvec
669
+ if tp in 'DF': # complex type
670
+ if mode == 3:
671
+ self.OPa = Minv_matvec
672
+ else:
673
+ raise ValueError("mode=4 invalid for complex A")
674
+ else: # real type
675
+ if mode == 3:
676
+ self.OPa = lambda x: np.real(Minv_matvec(x))
677
+ else:
678
+ self.OPa = lambda x: np.imag(Minv_matvec(x))
679
+ if M_matvec is None:
680
+ self.B = lambda x: x
681
+ self.bmat = 'I'
682
+ self.OP = self.OPa
683
+ else:
684
+ self.B = M_matvec
685
+ self.bmat = 'G'
686
+ self.OP = lambda x: self.OPa(M_matvec(x))
687
+ else:
688
+ raise ValueError("mode=%i not implemented" % mode)
689
+
690
+ if which not in _NEUPD_WHICH:
691
+ raise ValueError("Parameter which must be one of %s"
692
+ % ' '.join(_NEUPD_WHICH))
693
+ if k >= n - 1:
694
+ raise ValueError("k must be less than ndim(A)-1, k=%d" % k)
695
+
696
+ _ArpackParams.__init__(self, n, k, tp, mode, sigma,
697
+ ncv, v0, maxiter, which, tol)
698
+
699
+ if self.ncv > n or self.ncv <= k + 1:
700
+ raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
701
+
702
+ # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
703
+ self.workd = _aligned_zeros(3 * n, self.tp)
704
+ self.workl = _aligned_zeros(3 * self.ncv * (self.ncv + 2), self.tp)
705
+
706
+ ltr = _type_conv[self.tp]
707
+ self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
708
+ self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
709
+
710
+ self.iterate_infodict = _NAUPD_ERRORS[ltr]
711
+ self.extract_infodict = _NEUPD_ERRORS[ltr]
712
+
713
+ self.ipntr = np.zeros(14, arpack_int)
714
+
715
+ if self.tp in 'FD':
716
+ # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
717
+ self.rwork = _aligned_zeros(self.ncv, self.tp.lower())
718
+ else:
719
+ self.rwork = None
720
+
721
+ def iterate(self):
722
+ if self.tp in 'fd':
723
+ results = self._arpack_solver(self.ido, self.bmat, self.which, self.k,
724
+ self.tol, self.resid, self.v, self.iparam,
725
+ self.ipntr, self.workd, self.workl, self.info)
726
+ self.ido, self.tol, self.resid, self.v, \
727
+ self.iparam, self.ipntr, self.info = results
728
+
729
+ else:
730
+ results = self._arpack_solver(self.ido, self.bmat, self.which, self.k,
731
+ self.tol, self.resid, self.v, self.iparam,
732
+ self.ipntr, self.workd, self.workl,
733
+ self.rwork, self.info)
734
+ self.ido, self.tol, self.resid, self.v, \
735
+ self.iparam, self.ipntr, self.info = results
736
+
737
+
738
+ xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
739
+ yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
740
+ if self.ido == -1:
741
+ # initialization
742
+ self.workd[yslice] = self.OP(self.workd[xslice])
743
+ elif self.ido == 1:
744
+ # compute y = Op*x
745
+ if self.mode in (1, 2):
746
+ self.workd[yslice] = self.OP(self.workd[xslice])
747
+ else:
748
+ Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
749
+ self.workd[yslice] = self.OPa(self.workd[Bxslice])
750
+ elif self.ido == 2:
751
+ self.workd[yslice] = self.B(self.workd[xslice])
752
+ elif self.ido == 3:
753
+ raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
754
+ else:
755
+ self.converged = True
756
+
757
+ if self.info == 0:
758
+ pass
759
+ elif self.info == 1:
760
+ self._raise_no_convergence()
761
+ else:
762
+ raise ArpackError(self.info, infodict=self.iterate_infodict)
763
+
764
+ def extract(self, return_eigenvectors):
765
+ k, n = self.k, self.n
766
+
767
+ ierr = 0
768
+ howmny = 'A' # return all eigenvectors
769
+ sselect = np.zeros(self.ncv, 'int') # unused
770
+ sigmar = np.real(self.sigma)
771
+ sigmai = np.imag(self.sigma)
772
+ workev = np.zeros(3 * self.ncv, self.tp)
773
+
774
+ if self.tp in 'fd':
775
+ dr = np.zeros(k + 1, self.tp)
776
+ di = np.zeros(k + 1, self.tp)
777
+ zr = np.zeros((n, k + 1), self.tp)
778
+ dr, di, zr, ierr = \
779
+ self._arpack_extract(return_eigenvectors,
780
+ howmny, sselect, sigmar, sigmai, workev,
781
+ self.bmat, self.which, k, self.tol, self.resid,
782
+ self.v, self.iparam, self.ipntr,
783
+ self.workd, self.workl, self.info)
784
+ if ierr != 0:
785
+ raise ArpackError(ierr, infodict=self.extract_infodict)
786
+ nreturned = self.iparam[4] # number of good eigenvalues returned
787
+
788
+ # Build complex eigenvalues from real and imaginary parts
789
+ d = dr + 1.0j * di
790
+
791
+ # Arrange the eigenvectors: complex eigenvectors are stored as
792
+ # real,imaginary in consecutive columns
793
+ z = zr.astype(self.tp.upper())
794
+
795
+ # The ARPACK nonsymmetric real and double interface (s,d)naupd
796
+ # return eigenvalues and eigenvectors in real (float,double)
797
+ # arrays.
798
+
799
+ # Efficiency: this should check that return_eigenvectors == True
800
+ # before going through this construction.
801
+ if sigmai == 0:
802
+ i = 0
803
+ while i <= k:
804
+ # check if complex
805
+ if abs(d[i].imag) != 0:
806
+ # this is a complex conjugate pair with eigenvalues
807
+ # in consecutive columns
808
+ if i < k:
809
+ z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
810
+ z[:, i + 1] = z[:, i].conjugate()
811
+ i += 1
812
+ else:
813
+ #last eigenvalue is complex: the imaginary part of
814
+ # the eigenvector has not been returned
815
+ #this can only happen if nreturned > k, so we'll
816
+ # throw out this case.
817
+ nreturned -= 1
818
+ i += 1
819
+
820
+ else:
821
+ # real matrix, mode 3 or 4, imag(sigma) is nonzero:
822
+ # see remark 3 in <s,d>neupd.f
823
+ # Build complex eigenvalues from real and imaginary parts
824
+ i = 0
825
+ while i <= k:
826
+ if abs(d[i].imag) == 0:
827
+ d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
828
+ else:
829
+ if i < k:
830
+ z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
831
+ z[:, i + 1] = z[:, i].conjugate()
832
+ d[i] = ((np.dot(zr[:, i],
833
+ self.matvec(zr[:, i]))
834
+ + np.dot(zr[:, i + 1],
835
+ self.matvec(zr[:, i + 1])))
836
+ + 1j * (np.dot(zr[:, i],
837
+ self.matvec(zr[:, i + 1]))
838
+ - np.dot(zr[:, i + 1],
839
+ self.matvec(zr[:, i]))))
840
+ d[i + 1] = d[i].conj()
841
+ i += 1
842
+ else:
843
+ #last eigenvalue is complex: the imaginary part of
844
+ # the eigenvector has not been returned
845
+ #this can only happen if nreturned > k, so we'll
846
+ # throw out this case.
847
+ nreturned -= 1
848
+ i += 1
849
+
850
+ # Now we have k+1 possible eigenvalues and eigenvectors
851
+ # Return the ones specified by the keyword "which"
852
+
853
+ if nreturned <= k:
854
+ # we got less or equal as many eigenvalues we wanted
855
+ d = d[:nreturned]
856
+ z = z[:, :nreturned]
857
+ else:
858
+ # we got one extra eigenvalue (likely a cc pair, but which?)
859
+ if self.mode in (1, 2):
860
+ rd = d
861
+ elif self.mode in (3, 4):
862
+ rd = 1 / (d - self.sigma)
863
+
864
+ if self.which in ['LR', 'SR']:
865
+ ind = np.argsort(rd.real)
866
+ elif self.which in ['LI', 'SI']:
867
+ # for LI,SI ARPACK returns largest,smallest
868
+ # abs(imaginary) (complex pairs come together)
869
+ ind = np.argsort(abs(rd.imag))
870
+ else:
871
+ ind = np.argsort(abs(rd))
872
+
873
+ if self.which in ['LR', 'LM', 'LI']:
874
+ ind = ind[-k:][::-1]
875
+ elif self.which in ['SR', 'SM', 'SI']:
876
+ ind = ind[:k]
877
+
878
+ d = d[ind]
879
+ z = z[:, ind]
880
+ else:
881
+ # complex is so much simpler...
882
+ d, z, ierr =\
883
+ self._arpack_extract(return_eigenvectors,
884
+ howmny, sselect, self.sigma, workev,
885
+ self.bmat, self.which, k, self.tol, self.resid,
886
+ self.v, self.iparam, self.ipntr,
887
+ self.workd, self.workl, self.rwork, ierr)
888
+
889
+ if ierr != 0:
890
+ raise ArpackError(ierr, infodict=self.extract_infodict)
891
+
892
+ k_ok = self.iparam[4]
893
+ d = d[:k_ok]
894
+ z = z[:, :k_ok]
895
+
896
+ if return_eigenvectors:
897
+ return d, z
898
+ else:
899
+ return d
900
+
901
+
902
+ def _aslinearoperator_with_dtype(m):
903
+ m = aslinearoperator(m)
904
+ if not hasattr(m, 'dtype'):
905
+ x = np.zeros(m.shape[1])
906
+ m.dtype = (m * x).dtype
907
+ return m
908
+
909
+
910
+ class SpLuInv(LinearOperator):
911
+ """
912
+ SpLuInv:
913
+ helper class to repeatedly solve M*x=b
914
+ using a sparse LU-decomposition of M
915
+ """
916
+
917
+ def __init__(self, M):
918
+ self.M_lu = splu(M)
919
+ self.shape = M.shape
920
+ self.dtype = M.dtype
921
+ self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
922
+
923
+ def _matvec(self, x):
924
+ # careful here: splu.solve will throw away imaginary
925
+ # part of x if M is real
926
+ x = np.asarray(x)
927
+ if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
928
+ return (self.M_lu.solve(np.real(x).astype(self.dtype))
929
+ + 1j * self.M_lu.solve(np.imag(x).astype(self.dtype)))
930
+ else:
931
+ return self.M_lu.solve(x.astype(self.dtype))
932
+
933
+
934
+ class LuInv(LinearOperator):
935
+ """
936
+ LuInv:
937
+ helper class to repeatedly solve M*x=b
938
+ using an LU-decomposition of M
939
+ """
940
+
941
+ def __init__(self, M):
942
+ self.M_lu = lu_factor(M)
943
+ self.shape = M.shape
944
+ self.dtype = M.dtype
945
+
946
+ def _matvec(self, x):
947
+ return lu_solve(self.M_lu, x)
948
+
949
+
950
+ def gmres_loose(A, b, tol):
951
+ """
952
+ gmres with looser termination condition.
953
+ """
954
+ b = np.asarray(b)
955
+ min_tol = 1000 * np.sqrt(b.size) * np.finfo(b.dtype).eps
956
+ return gmres(A, b, rtol=max(tol, min_tol), atol=0)
957
+
958
+
959
+ class IterInv(LinearOperator):
960
+ """
961
+ IterInv:
962
+ helper class to repeatedly solve M*x=b
963
+ using an iterative method.
964
+ """
965
+
966
+ def __init__(self, M, ifunc=gmres_loose, tol=0):
967
+ self.M = M
968
+ if hasattr(M, 'dtype'):
969
+ self.dtype = M.dtype
970
+ else:
971
+ x = np.zeros(M.shape[1])
972
+ self.dtype = (M * x).dtype
973
+ self.shape = M.shape
974
+
975
+ if tol <= 0:
976
+ # when tol=0, ARPACK uses machine tolerance as calculated
977
+ # by LAPACK's _LAMCH function. We should match this
978
+ tol = 2 * np.finfo(self.dtype).eps
979
+ self.ifunc = ifunc
980
+ self.tol = tol
981
+
982
+ def _matvec(self, x):
983
+ b, info = self.ifunc(self.M, x, tol=self.tol)
984
+ if info != 0:
985
+ raise ValueError("Error in inverting M: function "
986
+ "%s did not converge (info = %i)."
987
+ % (self.ifunc.__name__, info))
988
+ return b
989
+
990
+
991
+ class IterOpInv(LinearOperator):
992
+ """
993
+ IterOpInv:
994
+ helper class to repeatedly solve [A-sigma*M]*x = b
995
+ using an iterative method
996
+ """
997
+
998
+ def __init__(self, A, M, sigma, ifunc=gmres_loose, tol=0):
999
+ self.A = A
1000
+ self.M = M
1001
+ self.sigma = sigma
1002
+
1003
+ def mult_func(x):
1004
+ return A.matvec(x) - sigma * M.matvec(x)
1005
+
1006
+ def mult_func_M_None(x):
1007
+ return A.matvec(x) - sigma * x
1008
+
1009
+ x = np.zeros(A.shape[1])
1010
+ if M is None:
1011
+ dtype = mult_func_M_None(x).dtype
1012
+ self.OP = LinearOperator(self.A.shape,
1013
+ mult_func_M_None,
1014
+ dtype=dtype)
1015
+ else:
1016
+ dtype = mult_func(x).dtype
1017
+ self.OP = LinearOperator(self.A.shape,
1018
+ mult_func,
1019
+ dtype=dtype)
1020
+ self.shape = A.shape
1021
+
1022
+ if tol <= 0:
1023
+ # when tol=0, ARPACK uses machine tolerance as calculated
1024
+ # by LAPACK's _LAMCH function. We should match this
1025
+ tol = 2 * np.finfo(self.OP.dtype).eps
1026
+ self.ifunc = ifunc
1027
+ self.tol = tol
1028
+
1029
+ def _matvec(self, x):
1030
+ b, info = self.ifunc(self.OP, x, tol=self.tol)
1031
+ if info != 0:
1032
+ raise ValueError("Error in inverting [A-sigma*M]: function "
1033
+ "%s did not converge (info = %i)."
1034
+ % (self.ifunc.__name__, info))
1035
+ return b
1036
+
1037
+ @property
1038
+ def dtype(self):
1039
+ return self.OP.dtype
1040
+
1041
+
1042
+ def _fast_spmatrix_to_csc(A, hermitian=False):
1043
+ """Convert sparse matrix to CSC (by transposing, if possible)"""
1044
+ if (A.format == "csr" and hermitian
1045
+ and not np.issubdtype(A.dtype, np.complexfloating)):
1046
+ return A.T
1047
+ elif is_pydata_spmatrix(A):
1048
+ # No need to convert
1049
+ return A
1050
+ else:
1051
+ return A.tocsc()
1052
+
1053
+
1054
+ def get_inv_matvec(M, hermitian=False, tol=0):
1055
+ if isdense(M):
1056
+ return LuInv(M).matvec
1057
+ elif issparse(M) or is_pydata_spmatrix(M):
1058
+ M = _fast_spmatrix_to_csc(M, hermitian=hermitian)
1059
+ return SpLuInv(M).matvec
1060
+ else:
1061
+ return IterInv(M, tol=tol).matvec
1062
+
1063
+
1064
+ def get_OPinv_matvec(A, M, sigma, hermitian=False, tol=0):
1065
+ if sigma == 0:
1066
+ return get_inv_matvec(A, hermitian=hermitian, tol=tol)
1067
+
1068
+ if M is None:
1069
+ #M is the identity matrix
1070
+ if isdense(A):
1071
+ if (np.issubdtype(A.dtype, np.complexfloating)
1072
+ or np.imag(sigma) == 0):
1073
+ A = np.copy(A)
1074
+ else:
1075
+ A = A + 0j
1076
+ A.flat[::A.shape[1] + 1] -= sigma
1077
+ return LuInv(A).matvec
1078
+ elif issparse(A) or is_pydata_spmatrix(A):
1079
+ A = A - sigma * eye(A.shape[0])
1080
+ A = _fast_spmatrix_to_csc(A, hermitian=hermitian)
1081
+ return SpLuInv(A).matvec
1082
+ else:
1083
+ return IterOpInv(_aslinearoperator_with_dtype(A),
1084
+ M, sigma, tol=tol).matvec
1085
+ else:
1086
+ if ((not isdense(A) and not issparse(A) and not is_pydata_spmatrix(A)) or
1087
+ (not isdense(M) and not issparse(M) and not is_pydata_spmatrix(A))):
1088
+ return IterOpInv(_aslinearoperator_with_dtype(A),
1089
+ _aslinearoperator_with_dtype(M),
1090
+ sigma, tol=tol).matvec
1091
+ elif isdense(A) or isdense(M):
1092
+ return LuInv(A - sigma * M).matvec
1093
+ else:
1094
+ OP = A - sigma * M
1095
+ OP = _fast_spmatrix_to_csc(OP, hermitian=hermitian)
1096
+ return SpLuInv(OP).matvec
1097
+
1098
+
1099
+ # ARPACK is not threadsafe or reentrant (SAVE variables), so we need a
1100
+ # lock and a re-entering check.
1101
+ _ARPACK_LOCK = ReentrancyLock("Nested calls to eigs/eighs not allowed: "
1102
+ "ARPACK is not re-entrant")
1103
+
1104
+
1105
+ def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None,
1106
+ ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
1107
+ Minv=None, OPinv=None, OPpart=None):
1108
+ """
1109
+ Find k eigenvalues and eigenvectors of the square matrix A.
1110
+
1111
+ Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem
1112
+ for w[i] eigenvalues with corresponding eigenvectors x[i].
1113
+
1114
+ If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the
1115
+ generalized eigenvalue problem for w[i] eigenvalues
1116
+ with corresponding eigenvectors x[i]
1117
+
1118
+ Parameters
1119
+ ----------
1120
+ A : ndarray, sparse matrix or LinearOperator
1121
+ An array, sparse matrix, or LinearOperator representing
1122
+ the operation ``A @ x``, where A is a real or complex square matrix.
1123
+ k : int, optional
1124
+ The number of eigenvalues and eigenvectors desired.
1125
+ `k` must be smaller than N-1. It is not possible to compute all
1126
+ eigenvectors of a matrix.
1127
+ M : ndarray, sparse matrix or LinearOperator, optional
1128
+ An array, sparse matrix, or LinearOperator representing
1129
+ the operation M@x for the generalized eigenvalue problem
1130
+
1131
+ A @ x = w * M @ x.
1132
+
1133
+ M must represent a real symmetric matrix if A is real, and must
1134
+ represent a complex Hermitian matrix if A is complex. For best
1135
+ results, the data type of M should be the same as that of A.
1136
+ Additionally:
1137
+
1138
+ If `sigma` is None, M is positive definite
1139
+
1140
+ If sigma is specified, M is positive semi-definite
1141
+
1142
+ If sigma is None, eigs requires an operator to compute the solution
1143
+ of the linear equation ``M @ x = b``. This is done internally via a
1144
+ (sparse) LU decomposition for an explicit matrix M, or via an
1145
+ iterative solver for a general linear operator. Alternatively,
1146
+ the user can supply the matrix or operator Minv, which gives
1147
+ ``x = Minv @ b = M^-1 @ b``.
1148
+ sigma : real or complex, optional
1149
+ Find eigenvalues near sigma using shift-invert mode. This requires
1150
+ an operator to compute the solution of the linear system
1151
+ ``[A - sigma * M] @ x = b``, where M is the identity matrix if
1152
+ unspecified. This is computed internally via a (sparse) LU
1153
+ decomposition for explicit matrices A & M, or via an iterative
1154
+ solver if either A or M is a general linear operator.
1155
+ Alternatively, the user can supply the matrix or operator OPinv,
1156
+ which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``.
1157
+ For a real matrix A, shift-invert can either be done in imaginary
1158
+ mode or real mode, specified by the parameter OPpart ('r' or 'i').
1159
+ Note that when sigma is specified, the keyword 'which' (below)
1160
+ refers to the shifted eigenvalues ``w'[i]`` where:
1161
+
1162
+ If A is real and OPpart == 'r' (default),
1163
+ ``w'[i] = 1/2 * [1/(w[i]-sigma) + 1/(w[i]-conj(sigma))]``.
1164
+
1165
+ If A is real and OPpart == 'i',
1166
+ ``w'[i] = 1/2i * [1/(w[i]-sigma) - 1/(w[i]-conj(sigma))]``.
1167
+
1168
+ If A is complex, ``w'[i] = 1/(w[i]-sigma)``.
1169
+
1170
+ v0 : ndarray, optional
1171
+ Starting vector for iteration.
1172
+ Default: random
1173
+ ncv : int, optional
1174
+ The number of Lanczos vectors generated
1175
+ `ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
1176
+ Default: ``min(n, max(2*k + 1, 20))``
1177
+ which : str, ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'], optional
1178
+ Which `k` eigenvectors and eigenvalues to find:
1179
+
1180
+ 'LM' : largest magnitude
1181
+
1182
+ 'SM' : smallest magnitude
1183
+
1184
+ 'LR' : largest real part
1185
+
1186
+ 'SR' : smallest real part
1187
+
1188
+ 'LI' : largest imaginary part
1189
+
1190
+ 'SI' : smallest imaginary part
1191
+
1192
+ When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
1193
+ (see discussion in 'sigma', above). ARPACK is generally better
1194
+ at finding large values than small values. If small eigenvalues are
1195
+ desired, consider using shift-invert mode for better performance.
1196
+ maxiter : int, optional
1197
+ Maximum number of Arnoldi update iterations allowed
1198
+ Default: ``n*10``
1199
+ tol : float, optional
1200
+ Relative accuracy for eigenvalues (stopping criterion)
1201
+ The default value of 0 implies machine precision.
1202
+ return_eigenvectors : bool, optional
1203
+ Return eigenvectors (True) in addition to eigenvalues
1204
+ Minv : ndarray, sparse matrix or LinearOperator, optional
1205
+ See notes in M, above.
1206
+ OPinv : ndarray, sparse matrix or LinearOperator, optional
1207
+ See notes in sigma, above.
1208
+ OPpart : {'r' or 'i'}, optional
1209
+ See notes in sigma, above
1210
+
1211
+ Returns
1212
+ -------
1213
+ w : ndarray
1214
+ Array of k eigenvalues.
1215
+ v : ndarray
1216
+ An array of `k` eigenvectors.
1217
+ ``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
1218
+
1219
+ Raises
1220
+ ------
1221
+ ArpackNoConvergence
1222
+ When the requested convergence is not obtained.
1223
+ The currently converged eigenvalues and eigenvectors can be found
1224
+ as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
1225
+ object.
1226
+
1227
+ See Also
1228
+ --------
1229
+ eigsh : eigenvalues and eigenvectors for symmetric matrix A
1230
+ svds : singular value decomposition for a matrix A
1231
+
1232
+ Notes
1233
+ -----
1234
+ This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
1235
+ ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
1236
+ find the eigenvalues and eigenvectors [2]_.
1237
+
1238
+ References
1239
+ ----------
1240
+ .. [1] ARPACK Software, https://github.com/opencollab/arpack-ng
1241
+ .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
1242
+ Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
1243
+ Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
1244
+
1245
+ Examples
1246
+ --------
1247
+ Find 6 eigenvectors of the identity matrix:
1248
+
1249
+ >>> import numpy as np
1250
+ >>> from scipy.sparse.linalg import eigs
1251
+ >>> id = np.eye(13)
1252
+ >>> vals, vecs = eigs(id, k=6)
1253
+ >>> vals
1254
+ array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
1255
+ >>> vecs.shape
1256
+ (13, 6)
1257
+
1258
+ """
1259
+ if A.shape[0] != A.shape[1]:
1260
+ raise ValueError(f'expected square matrix (shape={A.shape})')
1261
+ if M is not None:
1262
+ if M.shape != A.shape:
1263
+ raise ValueError(f'wrong M dimensions {M.shape}, should be {A.shape}')
1264
+ if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
1265
+ warnings.warn('M does not have the same type precision as A. '
1266
+ 'This may adversely affect ARPACK convergence',
1267
+ stacklevel=2)
1268
+
1269
+ n = A.shape[0]
1270
+
1271
+ if k <= 0:
1272
+ raise ValueError("k=%d must be greater than 0." % k)
1273
+
1274
+ if k >= n - 1:
1275
+ warnings.warn("k >= N - 1 for N * N square matrix. "
1276
+ "Attempting to use scipy.linalg.eig instead.",
1277
+ RuntimeWarning, stacklevel=2)
1278
+
1279
+ if issparse(A):
1280
+ raise TypeError("Cannot use scipy.linalg.eig for sparse A with "
1281
+ "k >= N - 1. Use scipy.linalg.eig(A.toarray()) or"
1282
+ " reduce k.")
1283
+ if isinstance(A, LinearOperator):
1284
+ raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
1285
+ "A with k >= N - 1.")
1286
+ if isinstance(M, LinearOperator):
1287
+ raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
1288
+ "M with k >= N - 1.")
1289
+
1290
+ return eig(A, b=M, right=return_eigenvectors)
1291
+
1292
+ if sigma is None:
1293
+ matvec = _aslinearoperator_with_dtype(A).matvec
1294
+
1295
+ if OPinv is not None:
1296
+ raise ValueError("OPinv should not be specified "
1297
+ "with sigma = None.")
1298
+ if OPpart is not None:
1299
+ raise ValueError("OPpart should not be specified with "
1300
+ "sigma = None or complex A")
1301
+
1302
+ if M is None:
1303
+ #standard eigenvalue problem
1304
+ mode = 1
1305
+ M_matvec = None
1306
+ Minv_matvec = None
1307
+ if Minv is not None:
1308
+ raise ValueError("Minv should not be "
1309
+ "specified with M = None.")
1310
+ else:
1311
+ #general eigenvalue problem
1312
+ mode = 2
1313
+ if Minv is None:
1314
+ Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol)
1315
+ else:
1316
+ Minv = _aslinearoperator_with_dtype(Minv)
1317
+ Minv_matvec = Minv.matvec
1318
+ M_matvec = _aslinearoperator_with_dtype(M).matvec
1319
+ else:
1320
+ #sigma is not None: shift-invert mode
1321
+ if np.issubdtype(A.dtype, np.complexfloating):
1322
+ if OPpart is not None:
1323
+ raise ValueError("OPpart should not be specified "
1324
+ "with sigma=None or complex A")
1325
+ mode = 3
1326
+ elif OPpart is None or OPpart.lower() == 'r':
1327
+ mode = 3
1328
+ elif OPpart.lower() == 'i':
1329
+ if np.imag(sigma) == 0:
1330
+ raise ValueError("OPpart cannot be 'i' if sigma is real")
1331
+ mode = 4
1332
+ else:
1333
+ raise ValueError("OPpart must be one of ('r','i')")
1334
+
1335
+ matvec = _aslinearoperator_with_dtype(A).matvec
1336
+ if Minv is not None:
1337
+ raise ValueError("Minv should not be specified when sigma is")
1338
+ if OPinv is None:
1339
+ Minv_matvec = get_OPinv_matvec(A, M, sigma,
1340
+ hermitian=False, tol=tol)
1341
+ else:
1342
+ OPinv = _aslinearoperator_with_dtype(OPinv)
1343
+ Minv_matvec = OPinv.matvec
1344
+ if M is None:
1345
+ M_matvec = None
1346
+ else:
1347
+ M_matvec = _aslinearoperator_with_dtype(M).matvec
1348
+
1349
+ params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
1350
+ M_matvec, Minv_matvec, sigma,
1351
+ ncv, v0, maxiter, which, tol)
1352
+
1353
+ with _ARPACK_LOCK:
1354
+ while not params.converged:
1355
+ params.iterate()
1356
+
1357
+ return params.extract(return_eigenvectors)
1358
+
1359
+
1360
+ def eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None,
1361
+ ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
1362
+ Minv=None, OPinv=None, mode='normal'):
1363
+ """
1364
+ Find k eigenvalues and eigenvectors of the real symmetric square matrix
1365
+ or complex Hermitian matrix A.
1366
+
1367
+ Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem for
1368
+ w[i] eigenvalues with corresponding eigenvectors x[i].
1369
+
1370
+ If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the
1371
+ generalized eigenvalue problem for w[i] eigenvalues
1372
+ with corresponding eigenvectors x[i].
1373
+
1374
+ Note that there is no specialized routine for the case when A is a complex
1375
+ Hermitian matrix. In this case, ``eigsh()`` will call ``eigs()`` and return the
1376
+ real parts of the eigenvalues thus obtained.
1377
+
1378
+ Parameters
1379
+ ----------
1380
+ A : ndarray, sparse matrix or LinearOperator
1381
+ A square operator representing the operation ``A @ x``, where ``A`` is
1382
+ real symmetric or complex Hermitian. For buckling mode (see below)
1383
+ ``A`` must additionally be positive-definite.
1384
+ k : int, optional
1385
+ The number of eigenvalues and eigenvectors desired.
1386
+ `k` must be smaller than N. It is not possible to compute all
1387
+ eigenvectors of a matrix.
1388
+
1389
+ Returns
1390
+ -------
1391
+ w : array
1392
+ Array of k eigenvalues.
1393
+ v : array
1394
+ An array representing the `k` eigenvectors. The column ``v[:, i]`` is
1395
+ the eigenvector corresponding to the eigenvalue ``w[i]``.
1396
+
1397
+ Other Parameters
1398
+ ----------------
1399
+ M : An N x N matrix, array, sparse matrix, or linear operator representing
1400
+ the operation ``M @ x`` for the generalized eigenvalue problem
1401
+
1402
+ A @ x = w * M @ x.
1403
+
1404
+ M must represent a real symmetric matrix if A is real, and must
1405
+ represent a complex Hermitian matrix if A is complex. For best
1406
+ results, the data type of M should be the same as that of A.
1407
+ Additionally:
1408
+
1409
+ If sigma is None, M is symmetric positive definite.
1410
+
1411
+ If sigma is specified, M is symmetric positive semi-definite.
1412
+
1413
+ In buckling mode, M is symmetric indefinite.
1414
+
1415
+ If sigma is None, eigsh requires an operator to compute the solution
1416
+ of the linear equation ``M @ x = b``. This is done internally via a
1417
+ (sparse) LU decomposition for an explicit matrix M, or via an
1418
+ iterative solver for a general linear operator. Alternatively,
1419
+ the user can supply the matrix or operator Minv, which gives
1420
+ ``x = Minv @ b = M^-1 @ b``.
1421
+ sigma : real
1422
+ Find eigenvalues near sigma using shift-invert mode. This requires
1423
+ an operator to compute the solution of the linear system
1424
+ ``[A - sigma * M] x = b``, where M is the identity matrix if
1425
+ unspecified. This is computed internally via a (sparse) LU
1426
+ decomposition for explicit matrices A & M, or via an iterative
1427
+ solver if either A or M is a general linear operator.
1428
+ Alternatively, the user can supply the matrix or operator OPinv,
1429
+ which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``.
1430
+ Note that when sigma is specified, the keyword 'which' refers to
1431
+ the shifted eigenvalues ``w'[i]`` where:
1432
+
1433
+ if mode == 'normal', ``w'[i] = 1 / (w[i] - sigma)``.
1434
+
1435
+ if mode == 'cayley', ``w'[i] = (w[i] + sigma) / (w[i] - sigma)``.
1436
+
1437
+ if mode == 'buckling', ``w'[i] = w[i] / (w[i] - sigma)``.
1438
+
1439
+ (see further discussion in 'mode' below)
1440
+ v0 : ndarray, optional
1441
+ Starting vector for iteration.
1442
+ Default: random
1443
+ ncv : int, optional
1444
+ The number of Lanczos vectors generated ncv must be greater than k and
1445
+ smaller than n; it is recommended that ``ncv > 2*k``.
1446
+ Default: ``min(n, max(2*k + 1, 20))``
1447
+ which : str ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
1448
+ If A is a complex Hermitian matrix, 'BE' is invalid.
1449
+ Which `k` eigenvectors and eigenvalues to find:
1450
+
1451
+ 'LM' : Largest (in magnitude) eigenvalues.
1452
+
1453
+ 'SM' : Smallest (in magnitude) eigenvalues.
1454
+
1455
+ 'LA' : Largest (algebraic) eigenvalues.
1456
+
1457
+ 'SA' : Smallest (algebraic) eigenvalues.
1458
+
1459
+ 'BE' : Half (k/2) from each end of the spectrum.
1460
+
1461
+ When k is odd, return one more (k/2+1) from the high end.
1462
+ When sigma != None, 'which' refers to the shifted eigenvalues ``w'[i]``
1463
+ (see discussion in 'sigma', above). ARPACK is generally better
1464
+ at finding large values than small values. If small eigenvalues are
1465
+ desired, consider using shift-invert mode for better performance.
1466
+ maxiter : int, optional
1467
+ Maximum number of Arnoldi update iterations allowed.
1468
+ Default: ``n*10``
1469
+ tol : float
1470
+ Relative accuracy for eigenvalues (stopping criterion).
1471
+ The default value of 0 implies machine precision.
1472
+ Minv : N x N matrix, array, sparse matrix, or LinearOperator
1473
+ See notes in M, above.
1474
+ OPinv : N x N matrix, array, sparse matrix, or LinearOperator
1475
+ See notes in sigma, above.
1476
+ return_eigenvectors : bool
1477
+ Return eigenvectors (True) in addition to eigenvalues.
1478
+ This value determines the order in which eigenvalues are sorted.
1479
+ The sort order is also dependent on the `which` variable.
1480
+
1481
+ For which = 'LM' or 'SA':
1482
+ If `return_eigenvectors` is True, eigenvalues are sorted by
1483
+ algebraic value.
1484
+
1485
+ If `return_eigenvectors` is False, eigenvalues are sorted by
1486
+ absolute value.
1487
+
1488
+ For which = 'BE' or 'LA':
1489
+ eigenvalues are always sorted by algebraic value.
1490
+
1491
+ For which = 'SM':
1492
+ If `return_eigenvectors` is True, eigenvalues are sorted by
1493
+ algebraic value.
1494
+
1495
+ If `return_eigenvectors` is False, eigenvalues are sorted by
1496
+ decreasing absolute value.
1497
+
1498
+ mode : string ['normal' | 'buckling' | 'cayley']
1499
+ Specify strategy to use for shift-invert mode. This argument applies
1500
+ only for real-valued A and sigma != None. For shift-invert mode,
1501
+ ARPACK internally solves the eigenvalue problem
1502
+ ``OP @ x'[i] = w'[i] * B @ x'[i]``
1503
+ and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
1504
+ into the desired eigenvectors and eigenvalues of the problem
1505
+ ``A @ x[i] = w[i] * M @ x[i]``.
1506
+ The modes are as follows:
1507
+
1508
+ 'normal' :
1509
+ OP = [A - sigma * M]^-1 @ M,
1510
+ B = M,
1511
+ w'[i] = 1 / (w[i] - sigma)
1512
+
1513
+ 'buckling' :
1514
+ OP = [A - sigma * M]^-1 @ A,
1515
+ B = A,
1516
+ w'[i] = w[i] / (w[i] - sigma)
1517
+
1518
+ 'cayley' :
1519
+ OP = [A - sigma * M]^-1 @ [A + sigma * M],
1520
+ B = M,
1521
+ w'[i] = (w[i] + sigma) / (w[i] - sigma)
1522
+
1523
+ The choice of mode will affect which eigenvalues are selected by
1524
+ the keyword 'which', and can also impact the stability of
1525
+ convergence (see [2] for a discussion).
1526
+
1527
+ Raises
1528
+ ------
1529
+ ArpackNoConvergence
1530
+ When the requested convergence is not obtained.
1531
+
1532
+ The currently converged eigenvalues and eigenvectors can be found
1533
+ as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
1534
+ object.
1535
+
1536
+ See Also
1537
+ --------
1538
+ eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
1539
+ svds : singular value decomposition for a matrix A
1540
+
1541
+ Notes
1542
+ -----
1543
+ This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
1544
+ functions which use the Implicitly Restarted Lanczos Method to
1545
+ find the eigenvalues and eigenvectors [2]_.
1546
+
1547
+ References
1548
+ ----------
1549
+ .. [1] ARPACK Software, https://github.com/opencollab/arpack-ng
1550
+ .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
1551
+ Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
1552
+ Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
1553
+
1554
+ Examples
1555
+ --------
1556
+ >>> import numpy as np
1557
+ >>> from scipy.sparse.linalg import eigsh
1558
+ >>> identity = np.eye(13)
1559
+ >>> eigenvalues, eigenvectors = eigsh(identity, k=6)
1560
+ >>> eigenvalues
1561
+ array([1., 1., 1., 1., 1., 1.])
1562
+ >>> eigenvectors.shape
1563
+ (13, 6)
1564
+
1565
+ """
1566
+ # complex Hermitian matrices should be solved with eigs
1567
+ if np.issubdtype(A.dtype, np.complexfloating):
1568
+ if mode != 'normal':
1569
+ raise ValueError("mode=%s cannot be used with "
1570
+ "complex matrix A" % mode)
1571
+ if which == 'BE':
1572
+ raise ValueError("which='BE' cannot be used with complex matrix A")
1573
+ elif which == 'LA':
1574
+ which = 'LR'
1575
+ elif which == 'SA':
1576
+ which = 'SR'
1577
+ ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
1578
+ ncv=ncv, maxiter=maxiter, tol=tol,
1579
+ return_eigenvectors=return_eigenvectors, Minv=Minv,
1580
+ OPinv=OPinv)
1581
+
1582
+ if return_eigenvectors:
1583
+ return ret[0].real, ret[1]
1584
+ else:
1585
+ return ret.real
1586
+
1587
+ if A.shape[0] != A.shape[1]:
1588
+ raise ValueError(f'expected square matrix (shape={A.shape})')
1589
+ if M is not None:
1590
+ if M.shape != A.shape:
1591
+ raise ValueError(f'wrong M dimensions {M.shape}, should be {A.shape}')
1592
+ if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
1593
+ warnings.warn('M does not have the same type precision as A. '
1594
+ 'This may adversely affect ARPACK convergence',
1595
+ stacklevel=2)
1596
+
1597
+ n = A.shape[0]
1598
+
1599
+ if k <= 0:
1600
+ raise ValueError("k must be greater than 0.")
1601
+
1602
+ if k >= n:
1603
+ warnings.warn("k >= N for N * N square matrix. "
1604
+ "Attempting to use scipy.linalg.eigh instead.",
1605
+ RuntimeWarning, stacklevel=2)
1606
+
1607
+ if issparse(A):
1608
+ raise TypeError("Cannot use scipy.linalg.eigh for sparse A with "
1609
+ "k >= N. Use scipy.linalg.eigh(A.toarray()) or"
1610
+ " reduce k.")
1611
+ if isinstance(A, LinearOperator):
1612
+ raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
1613
+ "A with k >= N.")
1614
+ if isinstance(M, LinearOperator):
1615
+ raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
1616
+ "M with k >= N.")
1617
+
1618
+ return eigh(A, b=M, eigvals_only=not return_eigenvectors)
1619
+
1620
+ if sigma is None:
1621
+ A = _aslinearoperator_with_dtype(A)
1622
+ matvec = A.matvec
1623
+
1624
+ if OPinv is not None:
1625
+ raise ValueError("OPinv should not be specified "
1626
+ "with sigma = None.")
1627
+ if M is None:
1628
+ #standard eigenvalue problem
1629
+ mode = 1
1630
+ M_matvec = None
1631
+ Minv_matvec = None
1632
+ if Minv is not None:
1633
+ raise ValueError("Minv should not be "
1634
+ "specified with M = None.")
1635
+ else:
1636
+ #general eigenvalue problem
1637
+ mode = 2
1638
+ if Minv is None:
1639
+ Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol)
1640
+ else:
1641
+ Minv = _aslinearoperator_with_dtype(Minv)
1642
+ Minv_matvec = Minv.matvec
1643
+ M_matvec = _aslinearoperator_with_dtype(M).matvec
1644
+ else:
1645
+ # sigma is not None: shift-invert mode
1646
+ if Minv is not None:
1647
+ raise ValueError("Minv should not be specified when sigma is")
1648
+
1649
+ # normal mode
1650
+ if mode == 'normal':
1651
+ mode = 3
1652
+ matvec = None
1653
+ if OPinv is None:
1654
+ Minv_matvec = get_OPinv_matvec(A, M, sigma,
1655
+ hermitian=True, tol=tol)
1656
+ else:
1657
+ OPinv = _aslinearoperator_with_dtype(OPinv)
1658
+ Minv_matvec = OPinv.matvec
1659
+ if M is None:
1660
+ M_matvec = None
1661
+ else:
1662
+ M = _aslinearoperator_with_dtype(M)
1663
+ M_matvec = M.matvec
1664
+
1665
+ # buckling mode
1666
+ elif mode == 'buckling':
1667
+ mode = 4
1668
+ if OPinv is None:
1669
+ Minv_matvec = get_OPinv_matvec(A, M, sigma,
1670
+ hermitian=True, tol=tol)
1671
+ else:
1672
+ Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
1673
+ matvec = _aslinearoperator_with_dtype(A).matvec
1674
+ M_matvec = None
1675
+
1676
+ # cayley-transform mode
1677
+ elif mode == 'cayley':
1678
+ mode = 5
1679
+ matvec = _aslinearoperator_with_dtype(A).matvec
1680
+ if OPinv is None:
1681
+ Minv_matvec = get_OPinv_matvec(A, M, sigma,
1682
+ hermitian=True, tol=tol)
1683
+ else:
1684
+ Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
1685
+ if M is None:
1686
+ M_matvec = None
1687
+ else:
1688
+ M_matvec = _aslinearoperator_with_dtype(M).matvec
1689
+
1690
+ # unrecognized mode
1691
+ else:
1692
+ raise ValueError("unrecognized mode '%s'" % mode)
1693
+
1694
+ params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
1695
+ M_matvec, Minv_matvec, sigma,
1696
+ ncv, v0, maxiter, which, tol)
1697
+
1698
+ with _ARPACK_LOCK:
1699
+ while not params.converged:
1700
+ params.iterate()
1701
+
1702
+ return params.extract(return_eigenvectors)
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (212 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/test_arpack.cpython-310.pyc ADDED
Binary file (17.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py ADDED
@@ -0,0 +1,718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __usage__ = """
2
+ To run tests locally:
3
+ python tests/test_arpack.py [-l<int>] [-v<int>]
4
+
5
+ """
6
+
7
+ import threading
8
+ import itertools
9
+
10
+ import numpy as np
11
+
12
+ from numpy.testing import assert_allclose, assert_equal, suppress_warnings
13
+ from pytest import raises as assert_raises
14
+ import pytest
15
+
16
+ from numpy import dot, conj, random
17
+ from scipy.linalg import eig, eigh
18
+ from scipy.sparse import csc_matrix, csr_matrix, diags, rand
19
+ from scipy.sparse.linalg import LinearOperator, aslinearoperator
20
+ from scipy.sparse.linalg._eigen.arpack import (eigs, eigsh, arpack,
21
+ ArpackNoConvergence)
22
+
23
+
24
+ from scipy._lib._gcutils import assert_deallocated, IS_PYPY
25
+
26
+
27
+ # precision for tests
28
+ _ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
29
+
30
+
31
+ def _get_test_tolerance(type_char, mattype=None, D_type=None, which=None):
32
+ """
33
+ Return tolerance values suitable for a given test:
34
+
35
+ Parameters
36
+ ----------
37
+ type_char : {'f', 'd', 'F', 'D'}
38
+ Data type in ARPACK eigenvalue problem
39
+ mattype : {csr_matrix, aslinearoperator, asarray}, optional
40
+ Linear operator type
41
+
42
+ Returns
43
+ -------
44
+ tol
45
+ Tolerance to pass to the ARPACK routine
46
+ rtol
47
+ Relative tolerance for outputs
48
+ atol
49
+ Absolute tolerance for outputs
50
+
51
+ """
52
+
53
+ rtol = {'f': 3000 * np.finfo(np.float32).eps,
54
+ 'F': 3000 * np.finfo(np.float32).eps,
55
+ 'd': 2000 * np.finfo(np.float64).eps,
56
+ 'D': 2000 * np.finfo(np.float64).eps}[type_char]
57
+ atol = rtol
58
+ tol = 0
59
+
60
+ if mattype is aslinearoperator and type_char in ('f', 'F'):
61
+ # iterative methods in single precision: worse errors
62
+ # also: bump ARPACK tolerance so that the iterative method converges
63
+ tol = 30 * np.finfo(np.float32).eps
64
+ rtol *= 5
65
+
66
+ if mattype is csr_matrix and type_char in ('f', 'F'):
67
+ # sparse in single precision: worse errors
68
+ rtol *= 5
69
+
70
+ if (
71
+ which in ('LM', 'SM', 'LA')
72
+ and D_type.name == "gen-hermitian-Mc"
73
+ ):
74
+ if type_char == 'F':
75
+ # missing case 1, 2, and more, from PR 14798
76
+ rtol *= 5
77
+
78
+ if type_char == 'D':
79
+ # missing more cases, from PR 14798
80
+ rtol *= 10
81
+ atol *= 10
82
+
83
+ return tol, rtol, atol
84
+
85
+
86
+ def generate_matrix(N, complex_=False, hermitian=False,
87
+ pos_definite=False, sparse=False):
88
+ M = np.random.random((N, N))
89
+ if complex_:
90
+ M = M + 1j * np.random.random((N, N))
91
+
92
+ if hermitian:
93
+ if pos_definite:
94
+ if sparse:
95
+ i = np.arange(N)
96
+ j = np.random.randint(N, size=N-2)
97
+ i, j = np.meshgrid(i, j)
98
+ M[i, j] = 0
99
+ M = np.dot(M.conj(), M.T)
100
+ else:
101
+ M = np.dot(M.conj(), M.T)
102
+ if sparse:
103
+ i = np.random.randint(N, size=N * N // 4)
104
+ j = np.random.randint(N, size=N * N // 4)
105
+ ind = np.nonzero(i == j)
106
+ j[ind] = (j[ind] + 1) % N
107
+ M[i, j] = 0
108
+ M[j, i] = 0
109
+ else:
110
+ if sparse:
111
+ i = np.random.randint(N, size=N * N // 2)
112
+ j = np.random.randint(N, size=N * N // 2)
113
+ M[i, j] = 0
114
+ return M
115
+
116
+
117
+ def generate_matrix_symmetric(N, pos_definite=False, sparse=False):
118
+ M = np.random.random((N, N))
119
+
120
+ M = 0.5 * (M + M.T) # Make M symmetric
121
+
122
+ if pos_definite:
123
+ Id = N * np.eye(N)
124
+ if sparse:
125
+ M = csr_matrix(M)
126
+ M += Id
127
+ else:
128
+ if sparse:
129
+ M = csr_matrix(M)
130
+
131
+ return M
132
+
133
+
134
+ def assert_allclose_cc(actual, desired, **kw):
135
+ """Almost equal or complex conjugates almost equal"""
136
+ try:
137
+ assert_allclose(actual, desired, **kw)
138
+ except AssertionError:
139
+ assert_allclose(actual, conj(desired), **kw)
140
+
141
+
142
+ def argsort_which(eigenvalues, typ, k, which,
143
+ sigma=None, OPpart=None, mode=None):
144
+ """Return sorted indices of eigenvalues using the "which" keyword
145
+ from eigs and eigsh"""
146
+ if sigma is None:
147
+ reval = np.round(eigenvalues, decimals=_ndigits[typ])
148
+ else:
149
+ if mode is None or mode == 'normal':
150
+ if OPpart is None:
151
+ reval = 1. / (eigenvalues - sigma)
152
+ elif OPpart == 'r':
153
+ reval = 0.5 * (1. / (eigenvalues - sigma)
154
+ + 1. / (eigenvalues - np.conj(sigma)))
155
+ elif OPpart == 'i':
156
+ reval = -0.5j * (1. / (eigenvalues - sigma)
157
+ - 1. / (eigenvalues - np.conj(sigma)))
158
+ elif mode == 'cayley':
159
+ reval = (eigenvalues + sigma) / (eigenvalues - sigma)
160
+ elif mode == 'buckling':
161
+ reval = eigenvalues / (eigenvalues - sigma)
162
+ else:
163
+ raise ValueError("mode='%s' not recognized" % mode)
164
+
165
+ reval = np.round(reval, decimals=_ndigits[typ])
166
+
167
+ if which in ['LM', 'SM']:
168
+ ind = np.argsort(abs(reval))
169
+ elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
170
+ ind = np.argsort(np.real(reval))
171
+ elif which in ['LI', 'SI']:
172
+ # for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
173
+ if typ.islower():
174
+ ind = np.argsort(abs(np.imag(reval)))
175
+ else:
176
+ ind = np.argsort(np.imag(reval))
177
+ else:
178
+ raise ValueError("which='%s' is unrecognized" % which)
179
+
180
+ if which in ['LM', 'LA', 'LR', 'LI']:
181
+ return ind[-k:]
182
+ elif which in ['SM', 'SA', 'SR', 'SI']:
183
+ return ind[:k]
184
+ elif which == 'BE':
185
+ return np.concatenate((ind[:k//2], ind[k//2-k:]))
186
+
187
+
188
+ def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
189
+ mattype=np.asarray, OPpart=None, mode='normal'):
190
+ general = ('bmat' in d)
191
+
192
+ if symmetric:
193
+ eigs_func = eigsh
194
+ else:
195
+ eigs_func = eigs
196
+
197
+ if general:
198
+ err = ("error for {}:general, typ={}, which={}, sigma={}, "
199
+ "mattype={}, OPpart={}, mode={}".format(eigs_func.__name__,
200
+ typ, which, sigma,
201
+ mattype.__name__,
202
+ OPpart, mode))
203
+ else:
204
+ err = ("error for {}:standard, typ={}, which={}, sigma={}, "
205
+ "mattype={}, OPpart={}, mode={}".format(eigs_func.__name__,
206
+ typ, which, sigma,
207
+ mattype.__name__,
208
+ OPpart, mode))
209
+
210
+ a = d['mat'].astype(typ)
211
+ ac = mattype(a)
212
+
213
+ if general:
214
+ b = d['bmat'].astype(typ)
215
+ bc = mattype(b)
216
+
217
+ # get exact eigenvalues
218
+ exact_eval = d['eval'].astype(typ.upper())
219
+ ind = argsort_which(exact_eval, typ, k, which,
220
+ sigma, OPpart, mode)
221
+ exact_eval = exact_eval[ind]
222
+
223
+ # compute arpack eigenvalues
224
+ kwargs = dict(which=which, v0=v0, sigma=sigma)
225
+ if eigs_func is eigsh:
226
+ kwargs['mode'] = mode
227
+ else:
228
+ kwargs['OPpart'] = OPpart
229
+
230
+ # compute suitable tolerances
231
+ kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype, d, which)
232
+ # on rare occasions, ARPACK routines return results that are proper
233
+ # eigenvalues and -vectors, but not necessarily the ones requested in
234
+ # the parameter which. This is inherent to the Krylov methods, and
235
+ # should not be treated as a failure. If such a rare situation
236
+ # occurs, the calculation is tried again (but at most a few times).
237
+ ntries = 0
238
+ while ntries < 5:
239
+ # solve
240
+ if general:
241
+ try:
242
+ eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
243
+ except ArpackNoConvergence:
244
+ kwargs['maxiter'] = 20*a.shape[0]
245
+ eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
246
+ else:
247
+ try:
248
+ eigenvalues, evec = eigs_func(ac, k, **kwargs)
249
+ except ArpackNoConvergence:
250
+ kwargs['maxiter'] = 20*a.shape[0]
251
+ eigenvalues, evec = eigs_func(ac, k, **kwargs)
252
+
253
+ ind = argsort_which(eigenvalues, typ, k, which,
254
+ sigma, OPpart, mode)
255
+ eigenvalues = eigenvalues[ind]
256
+ evec = evec[:, ind]
257
+
258
+ try:
259
+ # check eigenvalues
260
+ assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol,
261
+ err_msg=err)
262
+ check_evecs = True
263
+ except AssertionError:
264
+ check_evecs = False
265
+ ntries += 1
266
+
267
+ if check_evecs:
268
+ # check eigenvectors
269
+ LHS = np.dot(a, evec)
270
+ if general:
271
+ RHS = eigenvalues * np.dot(b, evec)
272
+ else:
273
+ RHS = eigenvalues * evec
274
+
275
+ assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
276
+ break
277
+
278
+ # check eigenvalues
279
+ assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, err_msg=err)
280
+
281
+
282
+ class DictWithRepr(dict):
283
+ def __init__(self, name):
284
+ self.name = name
285
+
286
+ def __repr__(self):
287
+ return "<%s>" % self.name
288
+
289
+
290
+ class SymmetricParams:
291
+ def __init__(self):
292
+ self.eigs = eigsh
293
+ self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
294
+ self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
295
+ self.sigmas_modes = {None: ['normal'],
296
+ 0.5: ['normal', 'buckling', 'cayley']}
297
+
298
+ # generate matrices
299
+ # these should all be float32 so that the eigenvalues
300
+ # are the same in float32 and float64
301
+ N = 6
302
+ np.random.seed(2300)
303
+ Ar = generate_matrix(N, hermitian=True,
304
+ pos_definite=True).astype('f').astype('d')
305
+ M = generate_matrix(N, hermitian=True,
306
+ pos_definite=True).astype('f').astype('d')
307
+ Ac = generate_matrix(N, hermitian=True, pos_definite=True,
308
+ complex_=True).astype('F').astype('D')
309
+ Mc = generate_matrix(N, hermitian=True, pos_definite=True,
310
+ complex_=True).astype('F').astype('D')
311
+ v0 = np.random.random(N)
312
+
313
+ # standard symmetric problem
314
+ SS = DictWithRepr("std-symmetric")
315
+ SS['mat'] = Ar
316
+ SS['v0'] = v0
317
+ SS['eval'] = eigh(SS['mat'], eigvals_only=True)
318
+
319
+ # general symmetric problem
320
+ GS = DictWithRepr("gen-symmetric")
321
+ GS['mat'] = Ar
322
+ GS['bmat'] = M
323
+ GS['v0'] = v0
324
+ GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
325
+
326
+ # standard hermitian problem
327
+ SH = DictWithRepr("std-hermitian")
328
+ SH['mat'] = Ac
329
+ SH['v0'] = v0
330
+ SH['eval'] = eigh(SH['mat'], eigvals_only=True)
331
+
332
+ # general hermitian problem
333
+ GH = DictWithRepr("gen-hermitian")
334
+ GH['mat'] = Ac
335
+ GH['bmat'] = M
336
+ GH['v0'] = v0
337
+ GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
338
+
339
+ # general hermitian problem with hermitian M
340
+ GHc = DictWithRepr("gen-hermitian-Mc")
341
+ GHc['mat'] = Ac
342
+ GHc['bmat'] = Mc
343
+ GHc['v0'] = v0
344
+ GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True)
345
+
346
+ self.real_test_cases = [SS, GS]
347
+ self.complex_test_cases = [SH, GH, GHc]
348
+
349
+
350
+ class NonSymmetricParams:
351
+ def __init__(self):
352
+ self.eigs = eigs
353
+ self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
354
+ self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
355
+ self.sigmas_OPparts = {None: [None],
356
+ 0.1: ['r'],
357
+ 0.1 + 0.1j: ['r', 'i']}
358
+
359
+ # generate matrices
360
+ # these should all be float32 so that the eigenvalues
361
+ # are the same in float32 and float64
362
+ N = 6
363
+ np.random.seed(2300)
364
+ Ar = generate_matrix(N).astype('f').astype('d')
365
+ M = generate_matrix(N, hermitian=True,
366
+ pos_definite=True).astype('f').astype('d')
367
+ Ac = generate_matrix(N, complex_=True).astype('F').astype('D')
368
+ v0 = np.random.random(N)
369
+
370
+ # standard real nonsymmetric problem
371
+ SNR = DictWithRepr("std-real-nonsym")
372
+ SNR['mat'] = Ar
373
+ SNR['v0'] = v0
374
+ SNR['eval'] = eig(SNR['mat'], left=False, right=False)
375
+
376
+ # general real nonsymmetric problem
377
+ GNR = DictWithRepr("gen-real-nonsym")
378
+ GNR['mat'] = Ar
379
+ GNR['bmat'] = M
380
+ GNR['v0'] = v0
381
+ GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
382
+
383
+ # standard complex nonsymmetric problem
384
+ SNC = DictWithRepr("std-cmplx-nonsym")
385
+ SNC['mat'] = Ac
386
+ SNC['v0'] = v0
387
+ SNC['eval'] = eig(SNC['mat'], left=False, right=False)
388
+
389
+ # general complex nonsymmetric problem
390
+ GNC = DictWithRepr("gen-cmplx-nonsym")
391
+ GNC['mat'] = Ac
392
+ GNC['bmat'] = M
393
+ GNC['v0'] = v0
394
+ GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
395
+
396
+ self.real_test_cases = [SNR, GNR]
397
+ self.complex_test_cases = [SNC, GNC]
398
+
399
+
400
+ def test_symmetric_modes():
401
+ params = SymmetricParams()
402
+ k = 2
403
+ symmetric = True
404
+ for D in params.real_test_cases:
405
+ for typ in 'fd':
406
+ for which in params.which:
407
+ for mattype in params.mattypes:
408
+ for (sigma, modes) in params.sigmas_modes.items():
409
+ for mode in modes:
410
+ eval_evec(symmetric, D, typ, k, which,
411
+ None, sigma, mattype, None, mode)
412
+
413
+
414
+ def test_hermitian_modes():
415
+ params = SymmetricParams()
416
+ k = 2
417
+ symmetric = True
418
+ for D in params.complex_test_cases:
419
+ for typ in 'FD':
420
+ for which in params.which:
421
+ if which == 'BE':
422
+ continue # BE invalid for complex
423
+ for mattype in params.mattypes:
424
+ for sigma in params.sigmas_modes:
425
+ eval_evec(symmetric, D, typ, k, which,
426
+ None, sigma, mattype)
427
+
428
+
429
+ def test_symmetric_starting_vector():
430
+ params = SymmetricParams()
431
+ symmetric = True
432
+ for k in [1, 2, 3, 4, 5]:
433
+ for D in params.real_test_cases:
434
+ for typ in 'fd':
435
+ v0 = random.rand(len(D['v0'])).astype(typ)
436
+ eval_evec(symmetric, D, typ, k, 'LM', v0)
437
+
438
+
439
+ def test_symmetric_no_convergence():
440
+ np.random.seed(1234)
441
+ m = generate_matrix(30, hermitian=True, pos_definite=True)
442
+ tol, rtol, atol = _get_test_tolerance('d')
443
+ try:
444
+ w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
445
+ raise AssertionError("Spurious no-error exit")
446
+ except ArpackNoConvergence as err:
447
+ k = len(err.eigenvalues)
448
+ if k <= 0:
449
+ raise AssertionError("Spurious no-eigenvalues-found case") from err
450
+ w, v = err.eigenvalues, err.eigenvectors
451
+ assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
452
+
453
+
454
+ def test_real_nonsymmetric_modes():
455
+ params = NonSymmetricParams()
456
+ k = 2
457
+ symmetric = False
458
+ for D in params.real_test_cases:
459
+ for typ in 'fd':
460
+ for which in params.which:
461
+ for mattype in params.mattypes:
462
+ for sigma, OPparts in params.sigmas_OPparts.items():
463
+ for OPpart in OPparts:
464
+ eval_evec(symmetric, D, typ, k, which,
465
+ None, sigma, mattype, OPpart)
466
+
467
+
468
+ def test_complex_nonsymmetric_modes():
469
+ params = NonSymmetricParams()
470
+ k = 2
471
+ symmetric = False
472
+ for D in params.complex_test_cases:
473
+ for typ in 'DF':
474
+ for which in params.which:
475
+ for mattype in params.mattypes:
476
+ for sigma in params.sigmas_OPparts:
477
+ eval_evec(symmetric, D, typ, k, which,
478
+ None, sigma, mattype)
479
+
480
+
481
+ def test_standard_nonsymmetric_starting_vector():
482
+ params = NonSymmetricParams()
483
+ sigma = None
484
+ symmetric = False
485
+ for k in [1, 2, 3, 4]:
486
+ for d in params.complex_test_cases:
487
+ for typ in 'FD':
488
+ A = d['mat']
489
+ n = A.shape[0]
490
+ v0 = random.rand(n).astype(typ)
491
+ eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
492
+
493
+
494
+ def test_general_nonsymmetric_starting_vector():
495
+ params = NonSymmetricParams()
496
+ sigma = None
497
+ symmetric = False
498
+ for k in [1, 2, 3, 4]:
499
+ for d in params.complex_test_cases:
500
+ for typ in 'FD':
501
+ A = d['mat']
502
+ n = A.shape[0]
503
+ v0 = random.rand(n).astype(typ)
504
+ eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
505
+
506
+
507
+ def test_standard_nonsymmetric_no_convergence():
508
+ np.random.seed(1234)
509
+ m = generate_matrix(30, complex_=True)
510
+ tol, rtol, atol = _get_test_tolerance('d')
511
+ try:
512
+ w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
513
+ raise AssertionError("Spurious no-error exit")
514
+ except ArpackNoConvergence as err:
515
+ k = len(err.eigenvalues)
516
+ if k <= 0:
517
+ raise AssertionError("Spurious no-eigenvalues-found case") from err
518
+ w, v = err.eigenvalues, err.eigenvectors
519
+ for ww, vv in zip(w, v.T):
520
+ assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
521
+
522
+
523
+ def test_eigen_bad_shapes():
524
+ # A is not square.
525
+ A = csc_matrix(np.zeros((2, 3)))
526
+ assert_raises(ValueError, eigs, A)
527
+
528
+
529
+ def test_eigen_bad_kwargs():
530
+ # Test eigen on wrong keyword argument
531
+ A = csc_matrix(np.zeros((8, 8)))
532
+ assert_raises(ValueError, eigs, A, which='XX')
533
+
534
+
535
+ def test_ticket_1459_arpack_crash():
536
+ for dtype in [np.float32, np.float64]:
537
+ # This test does not seem to catch the issue for float32,
538
+ # but we made the same fix there, just to be sure
539
+
540
+ N = 6
541
+ k = 2
542
+
543
+ np.random.seed(2301)
544
+ A = np.random.random((N, N)).astype(dtype)
545
+ v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
546
+ -0.34365925382227402451, 0.46122533684552280420,
547
+ -0.58001341115969040629, -0.78844877570084292984e-01],
548
+ dtype=dtype)
549
+
550
+ # Should not crash:
551
+ evals, evecs = eigs(A, k, v0=v0)
552
+
553
+
554
+ @pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
555
+ def test_linearoperator_deallocation():
556
+ # Check that the linear operators used by the Arpack wrappers are
557
+ # deallocatable by reference counting -- they are big objects, so
558
+ # Python's cyclic GC may not collect them fast enough before
559
+ # running out of memory if eigs/eigsh are called in a tight loop.
560
+
561
+ M_d = np.eye(10)
562
+ M_s = csc_matrix(M_d)
563
+ M_o = aslinearoperator(M_d)
564
+
565
+ with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
566
+ pass
567
+ with assert_deallocated(lambda: arpack.LuInv(M_d)):
568
+ pass
569
+ with assert_deallocated(lambda: arpack.IterInv(M_s)):
570
+ pass
571
+ with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
572
+ pass
573
+ with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
574
+ pass
575
+
576
+ def test_parallel_threads():
577
+ results = []
578
+ v0 = np.random.rand(50)
579
+
580
+ def worker():
581
+ x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
582
+ w, v = eigs(x, k=3, v0=v0)
583
+ results.append(w)
584
+
585
+ w, v = eigsh(x, k=3, v0=v0)
586
+ results.append(w)
587
+
588
+ threads = [threading.Thread(target=worker) for k in range(10)]
589
+ for t in threads:
590
+ t.start()
591
+ for t in threads:
592
+ t.join()
593
+
594
+ worker()
595
+
596
+ for r in results:
597
+ assert_allclose(r, results[-1])
598
+
599
+
600
+ def test_reentering():
601
+ # Just some linear operator that calls eigs recursively
602
+ def A_matvec(x):
603
+ x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
604
+ w, v = eigs(x, k=1)
605
+ return v / w[0]
606
+ A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
607
+
608
+ # The Fortran code is not reentrant, so this fails (gracefully, not crashing)
609
+ assert_raises(RuntimeError, eigs, A, k=1)
610
+ assert_raises(RuntimeError, eigsh, A, k=1)
611
+
612
+
613
+ def test_regression_arpackng_1315():
614
+ # Check that issue arpack-ng/#1315 is not present.
615
+ # Adapted from arpack-ng/TESTS/bug_1315_single.c
616
+ # If this fails, then the installed ARPACK library is faulty.
617
+
618
+ for dtype in [np.float32, np.float64]:
619
+ np.random.seed(1234)
620
+
621
+ w0 = np.arange(1, 1000+1).astype(dtype)
622
+ A = diags([w0], [0], shape=(1000, 1000))
623
+
624
+ v0 = np.random.rand(1000).astype(dtype)
625
+ w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
626
+
627
+ assert_allclose(np.sort(w), np.sort(w0[-9:]),
628
+ rtol=1e-4)
629
+
630
+
631
+ def test_eigs_for_k_greater():
632
+ # Test eigs() for k beyond limits.
633
+ A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
634
+ A = generate_matrix(4, sparse=False)
635
+ M_dense = np.random.random((4, 4))
636
+ M_sparse = generate_matrix(4, sparse=True)
637
+ M_linop = aslinearoperator(M_dense)
638
+ eig_tuple1 = eig(A, b=M_dense)
639
+ eig_tuple2 = eig(A, b=M_sparse)
640
+
641
+ with suppress_warnings() as sup:
642
+ sup.filter(RuntimeWarning)
643
+
644
+ assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
645
+ assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
646
+ assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
647
+ assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
648
+
649
+ # M as LinearOperator
650
+ assert_raises(TypeError, eigs, A, M=M_linop, k=3)
651
+
652
+ # Test 'A' for different types
653
+ assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
654
+ assert_raises(TypeError, eigs, A_sparse, k=3)
655
+
656
+
657
+ def test_eigsh_for_k_greater():
658
+ # Test eigsh() for k beyond limits.
659
+ A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
660
+ A = generate_matrix(4, sparse=False)
661
+ M_dense = generate_matrix_symmetric(4, pos_definite=True)
662
+ M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True)
663
+ M_linop = aslinearoperator(M_dense)
664
+ eig_tuple1 = eigh(A, b=M_dense)
665
+ eig_tuple2 = eigh(A, b=M_sparse)
666
+
667
+ with suppress_warnings() as sup:
668
+ sup.filter(RuntimeWarning)
669
+
670
+ assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
671
+ assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
672
+ assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
673
+
674
+ # M as LinearOperator
675
+ assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
676
+
677
+ # Test 'A' for different types
678
+ assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
679
+ assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
680
+
681
+
682
+ def test_real_eigs_real_k_subset():
683
+ np.random.seed(1)
684
+
685
+ n = 10
686
+ A = rand(n, n, density=0.5)
687
+ A.data *= 2
688
+ A.data -= 1
689
+
690
+ v0 = np.ones(n)
691
+
692
+ whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
693
+ dtypes = [np.float32, np.float64]
694
+
695
+ for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes):
696
+ prev_w = np.array([], dtype=dtype)
697
+ eps = np.finfo(dtype).eps
698
+ for k in range(1, 9):
699
+ w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma,
700
+ v0=v0.astype(dtype), tol=0)
701
+ assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps))
702
+
703
+ # Check that the set of eigenvalues for `k` is a subset of that for `k+1`
704
+ dist = abs(prev_w[:,None] - w).min(axis=1)
705
+ assert_allclose(dist, 0, atol=np.sqrt(eps))
706
+
707
+ prev_w = w
708
+
709
+ # Check sort order
710
+ if sigma is None:
711
+ d = w
712
+ else:
713
+ d = 1 / (w - sigma)
714
+
715
+ if which == 'LM':
716
+ # ARPACK is systematic for 'LM', but sort order
717
+ # appears not well defined for other modes
718
+ assert np.all(np.diff(abs(d)) <= 1e-6)
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
3
+
4
+ LOBPCG is a preconditioned eigensolver for large symmetric positive definite
5
+ (SPD) generalized eigenproblems.
6
+
7
+ Call the function lobpcg - see help for lobpcg.lobpcg.
8
+
9
+ """
10
+ from .lobpcg import *
11
+
12
+ __all__ = [s for s in dir() if not s.startswith('_')]
13
+
14
+ from scipy._lib._testutils import PytestTester
15
+ test = PytestTester(__name__)
16
+ del PytestTester
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (739 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-310.pyc ADDED
Binary file (25.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py ADDED
@@ -0,0 +1,1112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
3
+
4
+ References
5
+ ----------
6
+ .. [1] A. V. Knyazev (2001),
7
+ Toward the Optimal Preconditioned Eigensolver: Locally Optimal
8
+ Block Preconditioned Conjugate Gradient Method.
9
+ SIAM Journal on Scientific Computing 23, no. 2,
10
+ pp. 517-541. :doi:`10.1137/S1064827500366124`
11
+
12
+ .. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
13
+ Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
14
+ in hypre and PETSc. :arxiv:`0705.2626`
15
+
16
+ .. [3] A. V. Knyazev's C and MATLAB implementations:
17
+ https://github.com/lobpcg/blopex
18
+ """
19
+
20
+ import warnings
21
+ import numpy as np
22
+ from scipy.linalg import (inv, eigh, cho_factor, cho_solve,
23
+ cholesky, LinAlgError)
24
+ from scipy.sparse.linalg import LinearOperator
25
+ from scipy.sparse import issparse
26
+
27
+ __all__ = ["lobpcg"]
28
+
29
+
30
+ def _report_nonhermitian(M, name):
31
+ """
32
+ Report if `M` is not a Hermitian matrix given its type.
33
+ """
34
+ from scipy.linalg import norm
35
+
36
+ md = M - M.T.conj()
37
+ nmd = norm(md, 1)
38
+ tol = 10 * np.finfo(M.dtype).eps
39
+ tol = max(tol, tol * norm(M, 1))
40
+ if nmd > tol:
41
+ warnings.warn(
42
+ f"Matrix {name} of the type {M.dtype} is not Hermitian: "
43
+ f"condition: {nmd} < {tol} fails.",
44
+ UserWarning, stacklevel=4
45
+ )
46
+
47
+ def _as2d(ar):
48
+ """
49
+ If the input array is 2D return it, if it is 1D, append a dimension,
50
+ making it a column vector.
51
+ """
52
+ if ar.ndim == 2:
53
+ return ar
54
+ else: # Assume 1!
55
+ aux = np.asarray(ar)
56
+ aux.shape = (ar.shape[0], 1)
57
+ return aux
58
+
59
+
60
+ def _makeMatMat(m):
61
+ if m is None:
62
+ return None
63
+ elif callable(m):
64
+ return lambda v: m(v)
65
+ else:
66
+ return lambda v: m @ v
67
+
68
+
69
+ def _matmul_inplace(x, y, verbosityLevel=0):
70
+ """Perform 'np.matmul' in-place if possible.
71
+
72
+ If some sufficient conditions for inplace matmul are met, do so.
73
+ Otherwise try inplace update and fall back to overwrite if that fails.
74
+ """
75
+ if x.flags["CARRAY"] and x.shape[1] == y.shape[1] and x.dtype == y.dtype:
76
+ # conditions where we can guarantee that inplace updates will work;
77
+ # i.e. x is not a view/slice, x & y have compatible dtypes, and the
78
+ # shape of the result of x @ y matches the shape of x.
79
+ np.matmul(x, y, out=x)
80
+ else:
81
+ # ideally, we'd have an exhaustive list of conditions above when
82
+ # inplace updates are possible; since we don't, we opportunistically
83
+ # try if it works, and fall back to overwriting if necessary
84
+ try:
85
+ np.matmul(x, y, out=x)
86
+ except Exception:
87
+ if verbosityLevel:
88
+ warnings.warn(
89
+ "Inplace update of x = x @ y failed, "
90
+ "x needs to be overwritten.",
91
+ UserWarning, stacklevel=3
92
+ )
93
+ x = x @ y
94
+ return x
95
+
96
+
97
+ def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
98
+ """Changes blockVectorV in-place."""
99
+ YBV = blockVectorBY.T.conj() @ blockVectorV
100
+ tmp = cho_solve(factYBY, YBV)
101
+ blockVectorV -= blockVectorY @ tmp
102
+
103
+
104
+ def _b_orthonormalize(B, blockVectorV, blockVectorBV=None,
105
+ verbosityLevel=0):
106
+ """in-place B-orthonormalize the given block vector using Cholesky."""
107
+ if blockVectorBV is None:
108
+ if B is None:
109
+ blockVectorBV = blockVectorV
110
+ else:
111
+ try:
112
+ blockVectorBV = B(blockVectorV)
113
+ except Exception as e:
114
+ if verbosityLevel:
115
+ warnings.warn(
116
+ f"Secondary MatMul call failed with error\n"
117
+ f"{e}\n",
118
+ UserWarning, stacklevel=3
119
+ )
120
+ return None, None, None
121
+ if blockVectorBV.shape != blockVectorV.shape:
122
+ raise ValueError(
123
+ f"The shape {blockVectorV.shape} "
124
+ f"of the orthogonalized matrix not preserved\n"
125
+ f"and changed to {blockVectorBV.shape} "
126
+ f"after multiplying by the secondary matrix.\n"
127
+ )
128
+
129
+ VBV = blockVectorV.T.conj() @ blockVectorBV
130
+ try:
131
+ # VBV is a Cholesky factor from now on...
132
+ VBV = cholesky(VBV, overwrite_a=True)
133
+ VBV = inv(VBV, overwrite_a=True)
134
+ blockVectorV = _matmul_inplace(
135
+ blockVectorV, VBV,
136
+ verbosityLevel=verbosityLevel
137
+ )
138
+ if B is not None:
139
+ blockVectorBV = _matmul_inplace(
140
+ blockVectorBV, VBV,
141
+ verbosityLevel=verbosityLevel
142
+ )
143
+ return blockVectorV, blockVectorBV, VBV
144
+ except LinAlgError:
145
+ if verbosityLevel:
146
+ warnings.warn(
147
+ "Cholesky has failed.",
148
+ UserWarning, stacklevel=3
149
+ )
150
+ return None, None, None
151
+
152
+
153
+ def _get_indx(_lambda, num, largest):
154
+ """Get `num` indices into `_lambda` depending on `largest` option."""
155
+ ii = np.argsort(_lambda)
156
+ if largest:
157
+ ii = ii[:-num - 1:-1]
158
+ else:
159
+ ii = ii[:num]
160
+
161
+ return ii
162
+
163
+
164
+ def _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel):
165
+ if verbosityLevel:
166
+ _report_nonhermitian(gramA, "gramA")
167
+ _report_nonhermitian(gramB, "gramB")
168
+
169
+
170
+ def lobpcg(
171
+ A,
172
+ X,
173
+ B=None,
174
+ M=None,
175
+ Y=None,
176
+ tol=None,
177
+ maxiter=None,
178
+ largest=True,
179
+ verbosityLevel=0,
180
+ retLambdaHistory=False,
181
+ retResidualNormsHistory=False,
182
+ restartControl=20,
183
+ ):
184
+ """Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
185
+
186
+ LOBPCG is a preconditioned eigensolver for large real symmetric and complex
187
+ Hermitian definite generalized eigenproblems.
188
+
189
+ Parameters
190
+ ----------
191
+ A : {sparse matrix, ndarray, LinearOperator, callable object}
192
+ The Hermitian linear operator of the problem, usually given by a
193
+ sparse matrix. Often called the "stiffness matrix".
194
+ X : ndarray, float32 or float64
195
+ Initial approximation to the ``k`` eigenvectors (non-sparse).
196
+ If `A` has ``shape=(n,n)`` then `X` must have ``shape=(n,k)``.
197
+ B : {sparse matrix, ndarray, LinearOperator, callable object}
198
+ Optional. By default ``B = None``, which is equivalent to identity.
199
+ The right hand side operator in a generalized eigenproblem if present.
200
+ Often called the "mass matrix". Must be Hermitian positive definite.
201
+ M : {sparse matrix, ndarray, LinearOperator, callable object}
202
+ Optional. By default ``M = None``, which is equivalent to identity.
203
+ Preconditioner aiming to accelerate convergence.
204
+ Y : ndarray, float32 or float64, default: None
205
+ An ``n-by-sizeY`` ndarray of constraints with ``sizeY < n``.
206
+ The iterations will be performed in the ``B``-orthogonal complement
207
+ of the column-space of `Y`. `Y` must be full rank if present.
208
+ tol : scalar, optional
209
+ The default is ``tol=n*sqrt(eps)``.
210
+ Solver tolerance for the stopping criterion.
211
+ maxiter : int, default: 20
212
+ Maximum number of iterations.
213
+ largest : bool, default: True
214
+ When True, solve for the largest eigenvalues, otherwise the smallest.
215
+ verbosityLevel : int, optional
216
+ By default ``verbosityLevel=0`` no output.
217
+ Controls the solver standard/screen output.
218
+ retLambdaHistory : bool, default: False
219
+ Whether to return iterative eigenvalue history.
220
+ retResidualNormsHistory : bool, default: False
221
+ Whether to return iterative history of residual norms.
222
+ restartControl : int, optional.
223
+ Iterations restart if the residuals jump ``2**restartControl`` times
224
+ compared to the smallest recorded in ``retResidualNormsHistory``.
225
+ The default is ``restartControl=20``, making the restarts rare for
226
+ backward compatibility.
227
+
228
+ Returns
229
+ -------
230
+ lambda : ndarray of the shape ``(k, )``.
231
+ Array of ``k`` approximate eigenvalues.
232
+ v : ndarray of the same shape as ``X.shape``.
233
+ An array of ``k`` approximate eigenvectors.
234
+ lambdaHistory : ndarray, optional.
235
+ The eigenvalue history, if `retLambdaHistory` is ``True``.
236
+ ResidualNormsHistory : ndarray, optional.
237
+ The history of residual norms, if `retResidualNormsHistory`
238
+ is ``True``.
239
+
240
+ Notes
241
+ -----
242
+ The iterative loop runs ``maxit=maxiter`` (20 if ``maxit=None``)
243
+ iterations at most and finishes earlier if the tolerance is met.
244
+ Breaking backward compatibility with the previous version, LOBPCG
245
+ now returns the block of iterative vectors with the best accuracy rather
246
+ than the last one iterated, as a cure for possible divergence.
247
+
248
+ If ``X.dtype == np.float32`` and user-provided operations/multiplications
249
+ by `A`, `B`, and `M` all preserve the ``np.float32`` data type,
250
+ all the calculations and the output are in ``np.float32``.
251
+
252
+ The size of the iteration history output equals to the number of the best
253
+ (limited by `maxit`) iterations plus 3: initial, final, and postprocessing.
254
+
255
+ If both `retLambdaHistory` and `retResidualNormsHistory` are ``True``,
256
+ the return tuple has the following format
257
+ ``(lambda, V, lambda history, residual norms history)``.
258
+
259
+ In the following ``n`` denotes the matrix size and ``k`` the number
260
+ of required eigenvalues (smallest or largest).
261
+
262
+ The LOBPCG code internally solves eigenproblems of the size ``3k`` on every
263
+ iteration by calling the dense eigensolver `eigh`, so if ``k`` is not
264
+ small enough compared to ``n``, it makes no sense to call the LOBPCG code.
265
+ Moreover, if one calls the LOBPCG algorithm for ``5k > n``, it would likely
266
+ break internally, so the code calls the standard function `eigh` instead.
267
+ It is not that ``n`` should be large for the LOBPCG to work, but rather the
268
+ ratio ``n / k`` should be large. It you call LOBPCG with ``k=1``
269
+ and ``n=10``, it works though ``n`` is small. The method is intended
270
+ for extremely large ``n / k``.
271
+
272
+ The convergence speed depends basically on three factors:
273
+
274
+ 1. Quality of the initial approximations `X` to the seeking eigenvectors.
275
+ Randomly distributed around the origin vectors work well if no better
276
+ choice is known.
277
+
278
+ 2. Relative separation of the desired eigenvalues from the rest
279
+ of the eigenvalues. One can vary ``k`` to improve the separation.
280
+
281
+ 3. Proper preconditioning to shrink the spectral spread.
282
+ For example, a rod vibration test problem (under tests
283
+ directory) is ill-conditioned for large ``n``, so convergence will be
284
+ slow, unless efficient preconditioning is used. For this specific
285
+ problem, a good simple preconditioner function would be a linear solve
286
+ for `A`, which is easy to code since `A` is tridiagonal.
287
+
288
+ References
289
+ ----------
290
+ .. [1] A. V. Knyazev (2001),
291
+ Toward the Optimal Preconditioned Eigensolver: Locally Optimal
292
+ Block Preconditioned Conjugate Gradient Method.
293
+ SIAM Journal on Scientific Computing 23, no. 2,
294
+ pp. 517-541. :doi:`10.1137/S1064827500366124`
295
+
296
+ .. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
297
+ (2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
298
+ (BLOPEX) in hypre and PETSc. :arxiv:`0705.2626`
299
+
300
+ .. [3] A. V. Knyazev's C and MATLAB implementations:
301
+ https://github.com/lobpcg/blopex
302
+
303
+ Examples
304
+ --------
305
+ Our first example is minimalistic - find the largest eigenvalue of
306
+ a diagonal matrix by solving the non-generalized eigenvalue problem
307
+ ``A x = lambda x`` without constraints or preconditioning.
308
+
309
+ >>> import numpy as np
310
+ >>> from scipy.sparse import spdiags
311
+ >>> from scipy.sparse.linalg import LinearOperator, aslinearoperator
312
+ >>> from scipy.sparse.linalg import lobpcg
313
+
314
+ The square matrix size is
315
+
316
+ >>> n = 100
317
+
318
+ and its diagonal entries are 1, ..., 100 defined by
319
+
320
+ >>> vals = np.arange(1, n + 1).astype(np.int16)
321
+
322
+ The first mandatory input parameter in this test is
323
+ the sparse diagonal matrix `A`
324
+ of the eigenvalue problem ``A x = lambda x`` to solve.
325
+
326
+ >>> A = spdiags(vals, 0, n, n)
327
+ >>> A = A.astype(np.int16)
328
+ >>> A.toarray()
329
+ array([[ 1, 0, 0, ..., 0, 0, 0],
330
+ [ 0, 2, 0, ..., 0, 0, 0],
331
+ [ 0, 0, 3, ..., 0, 0, 0],
332
+ ...,
333
+ [ 0, 0, 0, ..., 98, 0, 0],
334
+ [ 0, 0, 0, ..., 0, 99, 0],
335
+ [ 0, 0, 0, ..., 0, 0, 100]], dtype=int16)
336
+
337
+ The second mandatory input parameter `X` is a 2D array with the
338
+ row dimension determining the number of requested eigenvalues.
339
+ `X` is an initial guess for targeted eigenvectors.
340
+ `X` must have linearly independent columns.
341
+ If no initial approximations available, randomly oriented vectors
342
+ commonly work best, e.g., with components normally distributed
343
+ around zero or uniformly distributed on the interval [-1 1].
344
+ Setting the initial approximations to dtype ``np.float32``
345
+ forces all iterative values to dtype ``np.float32`` speeding up
346
+ the run while still allowing accurate eigenvalue computations.
347
+
348
+ >>> k = 1
349
+ >>> rng = np.random.default_rng()
350
+ >>> X = rng.normal(size=(n, k))
351
+ >>> X = X.astype(np.float32)
352
+
353
+ >>> eigenvalues, _ = lobpcg(A, X, maxiter=60)
354
+ >>> eigenvalues
355
+ array([100.])
356
+ >>> eigenvalues.dtype
357
+ dtype('float32')
358
+
359
+ `lobpcg` needs only access the matrix product with `A` rather
360
+ then the matrix itself. Since the matrix `A` is diagonal in
361
+ this example, one can write a function of the matrix product
362
+ ``A @ X`` using the diagonal values ``vals`` only, e.g., by
363
+ element-wise multiplication with broadcasting in the lambda-function
364
+
365
+ >>> A_lambda = lambda X: vals[:, np.newaxis] * X
366
+
367
+ or the regular function
368
+
369
+ >>> def A_matmat(X):
370
+ ... return vals[:, np.newaxis] * X
371
+
372
+ and use the handle to one of these callables as an input
373
+
374
+ >>> eigenvalues, _ = lobpcg(A_lambda, X, maxiter=60)
375
+ >>> eigenvalues
376
+ array([100.])
377
+ >>> eigenvalues, _ = lobpcg(A_matmat, X, maxiter=60)
378
+ >>> eigenvalues
379
+ array([100.])
380
+
381
+ The traditional callable `LinearOperator` is no longer
382
+ necessary but still supported as the input to `lobpcg`.
383
+ Specifying ``matmat=A_matmat`` explicitly improves performance.
384
+
385
+ >>> A_lo = LinearOperator((n, n), matvec=A_matmat, matmat=A_matmat, dtype=np.int16)
386
+ >>> eigenvalues, _ = lobpcg(A_lo, X, maxiter=80)
387
+ >>> eigenvalues
388
+ array([100.])
389
+
390
+ The least efficient callable option is `aslinearoperator`:
391
+
392
+ >>> eigenvalues, _ = lobpcg(aslinearoperator(A), X, maxiter=80)
393
+ >>> eigenvalues
394
+ array([100.])
395
+
396
+ We now switch to computing the three smallest eigenvalues specifying
397
+
398
+ >>> k = 3
399
+ >>> X = np.random.default_rng().normal(size=(n, k))
400
+
401
+ and ``largest=False`` parameter
402
+
403
+ >>> eigenvalues, _ = lobpcg(A, X, largest=False, maxiter=80)
404
+ >>> print(eigenvalues)
405
+ [1. 2. 3.]
406
+
407
+ The next example illustrates computing 3 smallest eigenvalues of
408
+ the same matrix `A` given by the function handle ``A_matmat`` but
409
+ with constraints and preconditioning.
410
+
411
+ Constraints - an optional input parameter is a 2D array comprising
412
+ of column vectors that the eigenvectors must be orthogonal to
413
+
414
+ >>> Y = np.eye(n, 3)
415
+
416
+ The preconditioner acts as the inverse of `A` in this example, but
417
+ in the reduced precision ``np.float32`` even though the initial `X`
418
+ and thus all iterates and the output are in full ``np.float64``.
419
+
420
+ >>> inv_vals = 1./vals
421
+ >>> inv_vals = inv_vals.astype(np.float32)
422
+ >>> M = lambda X: inv_vals[:, np.newaxis] * X
423
+
424
+ Let us now solve the eigenvalue problem for the matrix `A` first
425
+ without preconditioning requesting 80 iterations
426
+
427
+ >>> eigenvalues, _ = lobpcg(A_matmat, X, Y=Y, largest=False, maxiter=80)
428
+ >>> eigenvalues
429
+ array([4., 5., 6.])
430
+ >>> eigenvalues.dtype
431
+ dtype('float64')
432
+
433
+ With preconditioning we need only 20 iterations from the same `X`
434
+
435
+ >>> eigenvalues, _ = lobpcg(A_matmat, X, Y=Y, M=M, largest=False, maxiter=20)
436
+ >>> eigenvalues
437
+ array([4., 5., 6.])
438
+
439
+ Note that the vectors passed in `Y` are the eigenvectors of the 3
440
+ smallest eigenvalues. The results returned above are orthogonal to those.
441
+
442
+ The primary matrix `A` may be indefinite, e.g., after shifting
443
+ ``vals`` by 50 from 1, ..., 100 to -49, ..., 50, we still can compute
444
+ the 3 smallest or largest eigenvalues.
445
+
446
+ >>> vals = vals - 50
447
+ >>> X = rng.normal(size=(n, k))
448
+ >>> eigenvalues, _ = lobpcg(A_matmat, X, largest=False, maxiter=99)
449
+ >>> eigenvalues
450
+ array([-49., -48., -47.])
451
+ >>> eigenvalues, _ = lobpcg(A_matmat, X, largest=True, maxiter=99)
452
+ >>> eigenvalues
453
+ array([50., 49., 48.])
454
+
455
+ """
456
+ blockVectorX = X
457
+ bestblockVectorX = blockVectorX
458
+ blockVectorY = Y
459
+ residualTolerance = tol
460
+ if maxiter is None:
461
+ maxiter = 20
462
+
463
+ bestIterationNumber = maxiter
464
+
465
+ sizeY = 0
466
+ if blockVectorY is not None:
467
+ if len(blockVectorY.shape) != 2:
468
+ warnings.warn(
469
+ f"Expected rank-2 array for argument Y, instead got "
470
+ f"{len(blockVectorY.shape)}, "
471
+ f"so ignore it and use no constraints.",
472
+ UserWarning, stacklevel=2
473
+ )
474
+ blockVectorY = None
475
+ else:
476
+ sizeY = blockVectorY.shape[1]
477
+
478
+ # Block size.
479
+ if blockVectorX is None:
480
+ raise ValueError("The mandatory initial matrix X cannot be None")
481
+ if len(blockVectorX.shape) != 2:
482
+ raise ValueError("expected rank-2 array for argument X")
483
+
484
+ n, sizeX = blockVectorX.shape
485
+
486
+ # Data type of iterates, determined by X, must be inexact
487
+ if not np.issubdtype(blockVectorX.dtype, np.inexact):
488
+ warnings.warn(
489
+ f"Data type for argument X is {blockVectorX.dtype}, "
490
+ f"which is not inexact, so casted to np.float32.",
491
+ UserWarning, stacklevel=2
492
+ )
493
+ blockVectorX = np.asarray(blockVectorX, dtype=np.float32)
494
+
495
+ if retLambdaHistory:
496
+ lambdaHistory = np.zeros((maxiter + 3, sizeX),
497
+ dtype=blockVectorX.dtype)
498
+ if retResidualNormsHistory:
499
+ residualNormsHistory = np.zeros((maxiter + 3, sizeX),
500
+ dtype=blockVectorX.dtype)
501
+
502
+ if verbosityLevel:
503
+ aux = "Solving "
504
+ if B is None:
505
+ aux += "standard"
506
+ else:
507
+ aux += "generalized"
508
+ aux += " eigenvalue problem with"
509
+ if M is None:
510
+ aux += "out"
511
+ aux += " preconditioning\n\n"
512
+ aux += "matrix size %d\n" % n
513
+ aux += "block size %d\n\n" % sizeX
514
+ if blockVectorY is None:
515
+ aux += "No constraints\n\n"
516
+ else:
517
+ if sizeY > 1:
518
+ aux += "%d constraints\n\n" % sizeY
519
+ else:
520
+ aux += "%d constraint\n\n" % sizeY
521
+ print(aux)
522
+
523
+ if (n - sizeY) < (5 * sizeX):
524
+ warnings.warn(
525
+ f"The problem size {n} minus the constraints size {sizeY} "
526
+ f"is too small relative to the block size {sizeX}. "
527
+ f"Using a dense eigensolver instead of LOBPCG iterations."
528
+ f"No output of the history of the iterations.",
529
+ UserWarning, stacklevel=2
530
+ )
531
+
532
+ sizeX = min(sizeX, n)
533
+
534
+ if blockVectorY is not None:
535
+ raise NotImplementedError(
536
+ "The dense eigensolver does not support constraints."
537
+ )
538
+
539
+ # Define the closed range of indices of eigenvalues to return.
540
+ if largest:
541
+ eigvals = (n - sizeX, n - 1)
542
+ else:
543
+ eigvals = (0, sizeX - 1)
544
+
545
+ try:
546
+ if isinstance(A, LinearOperator):
547
+ A = A(np.eye(n, dtype=int))
548
+ elif callable(A):
549
+ A = A(np.eye(n, dtype=int))
550
+ if A.shape != (n, n):
551
+ raise ValueError(
552
+ f"The shape {A.shape} of the primary matrix\n"
553
+ f"defined by a callable object is wrong.\n"
554
+ )
555
+ elif issparse(A):
556
+ A = A.toarray()
557
+ else:
558
+ A = np.asarray(A)
559
+ except Exception as e:
560
+ raise Exception(
561
+ f"Primary MatMul call failed with error\n"
562
+ f"{e}\n")
563
+
564
+ if B is not None:
565
+ try:
566
+ if isinstance(B, LinearOperator):
567
+ B = B(np.eye(n, dtype=int))
568
+ elif callable(B):
569
+ B = B(np.eye(n, dtype=int))
570
+ if B.shape != (n, n):
571
+ raise ValueError(
572
+ f"The shape {B.shape} of the secondary matrix\n"
573
+ f"defined by a callable object is wrong.\n"
574
+ )
575
+ elif issparse(B):
576
+ B = B.toarray()
577
+ else:
578
+ B = np.asarray(B)
579
+ except Exception as e:
580
+ raise Exception(
581
+ f"Secondary MatMul call failed with error\n"
582
+ f"{e}\n")
583
+
584
+ try:
585
+ vals, vecs = eigh(A,
586
+ B,
587
+ subset_by_index=eigvals,
588
+ check_finite=False)
589
+ if largest:
590
+ # Reverse order to be compatible with eigs() in 'LM' mode.
591
+ vals = vals[::-1]
592
+ vecs = vecs[:, ::-1]
593
+
594
+ return vals, vecs
595
+ except Exception as e:
596
+ raise Exception(
597
+ f"Dense eigensolver failed with error\n"
598
+ f"{e}\n"
599
+ )
600
+
601
+ if (residualTolerance is None) or (residualTolerance <= 0.0):
602
+ residualTolerance = np.sqrt(np.finfo(blockVectorX.dtype).eps) * n
603
+
604
+ A = _makeMatMat(A)
605
+ B = _makeMatMat(B)
606
+ M = _makeMatMat(M)
607
+
608
+ # Apply constraints to X.
609
+ if blockVectorY is not None:
610
+
611
+ if B is not None:
612
+ blockVectorBY = B(blockVectorY)
613
+ if blockVectorBY.shape != blockVectorY.shape:
614
+ raise ValueError(
615
+ f"The shape {blockVectorY.shape} "
616
+ f"of the constraint not preserved\n"
617
+ f"and changed to {blockVectorBY.shape} "
618
+ f"after multiplying by the secondary matrix.\n"
619
+ )
620
+ else:
621
+ blockVectorBY = blockVectorY
622
+
623
+ # gramYBY is a dense array.
624
+ gramYBY = blockVectorY.T.conj() @ blockVectorBY
625
+ try:
626
+ # gramYBY is a Cholesky factor from now on...
627
+ gramYBY = cho_factor(gramYBY, overwrite_a=True)
628
+ except LinAlgError as e:
629
+ raise ValueError("Linearly dependent constraints") from e
630
+
631
+ _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
632
+
633
+ ##
634
+ # B-orthonormalize X.
635
+ blockVectorX, blockVectorBX, _ = _b_orthonormalize(
636
+ B, blockVectorX, verbosityLevel=verbosityLevel)
637
+ if blockVectorX is None:
638
+ raise ValueError("Linearly dependent initial approximations")
639
+
640
+ ##
641
+ # Compute the initial Ritz vectors: solve the eigenproblem.
642
+ blockVectorAX = A(blockVectorX)
643
+ if blockVectorAX.shape != blockVectorX.shape:
644
+ raise ValueError(
645
+ f"The shape {blockVectorX.shape} "
646
+ f"of the initial approximations not preserved\n"
647
+ f"and changed to {blockVectorAX.shape} "
648
+ f"after multiplying by the primary matrix.\n"
649
+ )
650
+
651
+ gramXAX = blockVectorX.T.conj() @ blockVectorAX
652
+
653
+ _lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
654
+ ii = _get_indx(_lambda, sizeX, largest)
655
+ _lambda = _lambda[ii]
656
+ if retLambdaHistory:
657
+ lambdaHistory[0, :] = _lambda
658
+
659
+ eigBlockVector = np.asarray(eigBlockVector[:, ii])
660
+ blockVectorX = _matmul_inplace(
661
+ blockVectorX, eigBlockVector,
662
+ verbosityLevel=verbosityLevel
663
+ )
664
+ blockVectorAX = _matmul_inplace(
665
+ blockVectorAX, eigBlockVector,
666
+ verbosityLevel=verbosityLevel
667
+ )
668
+ if B is not None:
669
+ blockVectorBX = _matmul_inplace(
670
+ blockVectorBX, eigBlockVector,
671
+ verbosityLevel=verbosityLevel
672
+ )
673
+
674
+ ##
675
+ # Active index set.
676
+ activeMask = np.ones((sizeX,), dtype=bool)
677
+
678
+ ##
679
+ # Main iteration loop.
680
+
681
+ blockVectorP = None # set during iteration
682
+ blockVectorAP = None
683
+ blockVectorBP = None
684
+
685
+ smallestResidualNorm = np.abs(np.finfo(blockVectorX.dtype).max)
686
+
687
+ iterationNumber = -1
688
+ restart = True
689
+ forcedRestart = False
690
+ explicitGramFlag = False
691
+ while iterationNumber < maxiter:
692
+ iterationNumber += 1
693
+
694
+ if B is not None:
695
+ aux = blockVectorBX * _lambda[np.newaxis, :]
696
+ else:
697
+ aux = blockVectorX * _lambda[np.newaxis, :]
698
+
699
+ blockVectorR = blockVectorAX - aux
700
+
701
+ aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
702
+ residualNorms = np.sqrt(np.abs(aux))
703
+ if retResidualNormsHistory:
704
+ residualNormsHistory[iterationNumber, :] = residualNorms
705
+ residualNorm = np.sum(np.abs(residualNorms)) / sizeX
706
+
707
+ if residualNorm < smallestResidualNorm:
708
+ smallestResidualNorm = residualNorm
709
+ bestIterationNumber = iterationNumber
710
+ bestblockVectorX = blockVectorX
711
+ elif residualNorm > 2**restartControl * smallestResidualNorm:
712
+ forcedRestart = True
713
+ blockVectorAX = A(blockVectorX)
714
+ if blockVectorAX.shape != blockVectorX.shape:
715
+ raise ValueError(
716
+ f"The shape {blockVectorX.shape} "
717
+ f"of the restarted iterate not preserved\n"
718
+ f"and changed to {blockVectorAX.shape} "
719
+ f"after multiplying by the primary matrix.\n"
720
+ )
721
+ if B is not None:
722
+ blockVectorBX = B(blockVectorX)
723
+ if blockVectorBX.shape != blockVectorX.shape:
724
+ raise ValueError(
725
+ f"The shape {blockVectorX.shape} "
726
+ f"of the restarted iterate not preserved\n"
727
+ f"and changed to {blockVectorBX.shape} "
728
+ f"after multiplying by the secondary matrix.\n"
729
+ )
730
+
731
+ ii = np.where(residualNorms > residualTolerance, True, False)
732
+ activeMask = activeMask & ii
733
+ currentBlockSize = activeMask.sum()
734
+
735
+ if verbosityLevel:
736
+ print(f"iteration {iterationNumber}")
737
+ print(f"current block size: {currentBlockSize}")
738
+ print(f"eigenvalue(s):\n{_lambda}")
739
+ print(f"residual norm(s):\n{residualNorms}")
740
+
741
+ if currentBlockSize == 0:
742
+ break
743
+
744
+ activeBlockVectorR = _as2d(blockVectorR[:, activeMask])
745
+
746
+ if iterationNumber > 0:
747
+ activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
748
+ activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
749
+ if B is not None:
750
+ activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])
751
+
752
+ if M is not None:
753
+ # Apply preconditioner T to the active residuals.
754
+ activeBlockVectorR = M(activeBlockVectorR)
755
+
756
+ ##
757
+ # Apply constraints to the preconditioned residuals.
758
+ if blockVectorY is not None:
759
+ _applyConstraints(activeBlockVectorR,
760
+ gramYBY,
761
+ blockVectorBY,
762
+ blockVectorY)
763
+
764
+ ##
765
+ # B-orthogonalize the preconditioned residuals to X.
766
+ if B is not None:
767
+ activeBlockVectorR = activeBlockVectorR - (
768
+ blockVectorX @
769
+ (blockVectorBX.T.conj() @ activeBlockVectorR)
770
+ )
771
+ else:
772
+ activeBlockVectorR = activeBlockVectorR - (
773
+ blockVectorX @
774
+ (blockVectorX.T.conj() @ activeBlockVectorR)
775
+ )
776
+
777
+ ##
778
+ # B-orthonormalize the preconditioned residuals.
779
+ aux = _b_orthonormalize(
780
+ B, activeBlockVectorR, verbosityLevel=verbosityLevel)
781
+ activeBlockVectorR, activeBlockVectorBR, _ = aux
782
+
783
+ if activeBlockVectorR is None:
784
+ warnings.warn(
785
+ f"Failed at iteration {iterationNumber} with accuracies "
786
+ f"{residualNorms}\n not reaching the requested "
787
+ f"tolerance {residualTolerance}.",
788
+ UserWarning, stacklevel=2
789
+ )
790
+ break
791
+ activeBlockVectorAR = A(activeBlockVectorR)
792
+
793
+ if iterationNumber > 0:
794
+ if B is not None:
795
+ aux = _b_orthonormalize(
796
+ B, activeBlockVectorP, activeBlockVectorBP,
797
+ verbosityLevel=verbosityLevel
798
+ )
799
+ activeBlockVectorP, activeBlockVectorBP, invR = aux
800
+ else:
801
+ aux = _b_orthonormalize(B, activeBlockVectorP,
802
+ verbosityLevel=verbosityLevel)
803
+ activeBlockVectorP, _, invR = aux
804
+ # Function _b_orthonormalize returns None if Cholesky fails
805
+ if activeBlockVectorP is not None:
806
+ activeBlockVectorAP = _matmul_inplace(
807
+ activeBlockVectorAP, invR,
808
+ verbosityLevel=verbosityLevel
809
+ )
810
+ restart = forcedRestart
811
+ else:
812
+ restart = True
813
+
814
+ ##
815
+ # Perform the Rayleigh Ritz Procedure:
816
+ # Compute symmetric Gram matrices:
817
+
818
+ if activeBlockVectorAR.dtype == "float32":
819
+ myeps = 1
820
+ else:
821
+ myeps = np.sqrt(np.finfo(activeBlockVectorR.dtype).eps)
822
+
823
+ if residualNorms.max() > myeps and not explicitGramFlag:
824
+ explicitGramFlag = False
825
+ else:
826
+ # Once explicitGramFlag, forever explicitGramFlag.
827
+ explicitGramFlag = True
828
+
829
+ # Shared memory assignments to simplify the code
830
+ if B is None:
831
+ blockVectorBX = blockVectorX
832
+ activeBlockVectorBR = activeBlockVectorR
833
+ if not restart:
834
+ activeBlockVectorBP = activeBlockVectorP
835
+
836
+ # Common submatrices:
837
+ gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
838
+ gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
839
+
840
+ gramDtype = activeBlockVectorAR.dtype
841
+ if explicitGramFlag:
842
+ gramRAR = (gramRAR + gramRAR.T.conj()) / 2
843
+ gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
844
+ gramXAX = (gramXAX + gramXAX.T.conj()) / 2
845
+ gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
846
+ gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR)
847
+ gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
848
+ else:
849
+ gramXAX = np.diag(_lambda).astype(gramDtype)
850
+ gramXBX = np.eye(sizeX, dtype=gramDtype)
851
+ gramRBR = np.eye(currentBlockSize, dtype=gramDtype)
852
+ gramXBR = np.zeros((sizeX, currentBlockSize), dtype=gramDtype)
853
+
854
+ if not restart:
855
+ gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
856
+ gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
857
+ gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
858
+ gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
859
+ gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
860
+ if explicitGramFlag:
861
+ gramPAP = (gramPAP + gramPAP.T.conj()) / 2
862
+ gramPBP = np.dot(activeBlockVectorP.T.conj(),
863
+ activeBlockVectorBP)
864
+ else:
865
+ gramPBP = np.eye(currentBlockSize, dtype=gramDtype)
866
+
867
+ gramA = np.block(
868
+ [
869
+ [gramXAX, gramXAR, gramXAP],
870
+ [gramXAR.T.conj(), gramRAR, gramRAP],
871
+ [gramXAP.T.conj(), gramRAP.T.conj(), gramPAP],
872
+ ]
873
+ )
874
+ gramB = np.block(
875
+ [
876
+ [gramXBX, gramXBR, gramXBP],
877
+ [gramXBR.T.conj(), gramRBR, gramRBP],
878
+ [gramXBP.T.conj(), gramRBP.T.conj(), gramPBP],
879
+ ]
880
+ )
881
+
882
+ _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel)
883
+
884
+ try:
885
+ _lambda, eigBlockVector = eigh(gramA,
886
+ gramB,
887
+ check_finite=False)
888
+ except LinAlgError as e:
889
+ # raise ValueError("eigh failed in lobpcg iterations") from e
890
+ if verbosityLevel:
891
+ warnings.warn(
892
+ f"eigh failed at iteration {iterationNumber} \n"
893
+ f"with error {e} causing a restart.\n",
894
+ UserWarning, stacklevel=2
895
+ )
896
+ # try again after dropping the direction vectors P from RR
897
+ restart = True
898
+
899
+ if restart:
900
+ gramA = np.block([[gramXAX, gramXAR], [gramXAR.T.conj(), gramRAR]])
901
+ gramB = np.block([[gramXBX, gramXBR], [gramXBR.T.conj(), gramRBR]])
902
+
903
+ _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel)
904
+
905
+ try:
906
+ _lambda, eigBlockVector = eigh(gramA,
907
+ gramB,
908
+ check_finite=False)
909
+ except LinAlgError as e:
910
+ # raise ValueError("eigh failed in lobpcg iterations") from e
911
+ warnings.warn(
912
+ f"eigh failed at iteration {iterationNumber} with error\n"
913
+ f"{e}\n",
914
+ UserWarning, stacklevel=2
915
+ )
916
+ break
917
+
918
+ ii = _get_indx(_lambda, sizeX, largest)
919
+ _lambda = _lambda[ii]
920
+ eigBlockVector = eigBlockVector[:, ii]
921
+ if retLambdaHistory:
922
+ lambdaHistory[iterationNumber + 1, :] = _lambda
923
+
924
+ # Compute Ritz vectors.
925
+ if B is not None:
926
+ if not restart:
927
+ eigBlockVectorX = eigBlockVector[:sizeX]
928
+ eigBlockVectorR = eigBlockVector[sizeX:
929
+ sizeX + currentBlockSize]
930
+ eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
931
+
932
+ pp = np.dot(activeBlockVectorR, eigBlockVectorR)
933
+ pp += np.dot(activeBlockVectorP, eigBlockVectorP)
934
+
935
+ app = np.dot(activeBlockVectorAR, eigBlockVectorR)
936
+ app += np.dot(activeBlockVectorAP, eigBlockVectorP)
937
+
938
+ bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
939
+ bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
940
+ else:
941
+ eigBlockVectorX = eigBlockVector[:sizeX]
942
+ eigBlockVectorR = eigBlockVector[sizeX:]
943
+
944
+ pp = np.dot(activeBlockVectorR, eigBlockVectorR)
945
+ app = np.dot(activeBlockVectorAR, eigBlockVectorR)
946
+ bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
947
+
948
+ blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
949
+ blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
950
+ blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp
951
+
952
+ blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
953
+
954
+ else:
955
+ if not restart:
956
+ eigBlockVectorX = eigBlockVector[:sizeX]
957
+ eigBlockVectorR = eigBlockVector[sizeX:
958
+ sizeX + currentBlockSize]
959
+ eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
960
+
961
+ pp = np.dot(activeBlockVectorR, eigBlockVectorR)
962
+ pp += np.dot(activeBlockVectorP, eigBlockVectorP)
963
+
964
+ app = np.dot(activeBlockVectorAR, eigBlockVectorR)
965
+ app += np.dot(activeBlockVectorAP, eigBlockVectorP)
966
+ else:
967
+ eigBlockVectorX = eigBlockVector[:sizeX]
968
+ eigBlockVectorR = eigBlockVector[sizeX:]
969
+
970
+ pp = np.dot(activeBlockVectorR, eigBlockVectorR)
971
+ app = np.dot(activeBlockVectorAR, eigBlockVectorR)
972
+
973
+ blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
974
+ blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
975
+
976
+ blockVectorP, blockVectorAP = pp, app
977
+
978
+ if B is not None:
979
+ aux = blockVectorBX * _lambda[np.newaxis, :]
980
+ else:
981
+ aux = blockVectorX * _lambda[np.newaxis, :]
982
+
983
+ blockVectorR = blockVectorAX - aux
984
+
985
+ aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
986
+ residualNorms = np.sqrt(np.abs(aux))
987
+ # Use old lambda in case of early loop exit.
988
+ if retLambdaHistory:
989
+ lambdaHistory[iterationNumber + 1, :] = _lambda
990
+ if retResidualNormsHistory:
991
+ residualNormsHistory[iterationNumber + 1, :] = residualNorms
992
+ residualNorm = np.sum(np.abs(residualNorms)) / sizeX
993
+ if residualNorm < smallestResidualNorm:
994
+ smallestResidualNorm = residualNorm
995
+ bestIterationNumber = iterationNumber + 1
996
+ bestblockVectorX = blockVectorX
997
+
998
+ if np.max(np.abs(residualNorms)) > residualTolerance:
999
+ warnings.warn(
1000
+ f"Exited at iteration {iterationNumber} with accuracies \n"
1001
+ f"{residualNorms}\n"
1002
+ f"not reaching the requested tolerance {residualTolerance}.\n"
1003
+ f"Use iteration {bestIterationNumber} instead with accuracy \n"
1004
+ f"{smallestResidualNorm}.\n",
1005
+ UserWarning, stacklevel=2
1006
+ )
1007
+
1008
+ if verbosityLevel:
1009
+ print(f"Final iterative eigenvalue(s):\n{_lambda}")
1010
+ print(f"Final iterative residual norm(s):\n{residualNorms}")
1011
+
1012
+ blockVectorX = bestblockVectorX
1013
+ # Making eigenvectors "exactly" satisfy the blockVectorY constrains
1014
+ if blockVectorY is not None:
1015
+ _applyConstraints(blockVectorX,
1016
+ gramYBY,
1017
+ blockVectorBY,
1018
+ blockVectorY)
1019
+
1020
+ # Making eigenvectors "exactly" othonormalized by final "exact" RR
1021
+ blockVectorAX = A(blockVectorX)
1022
+ if blockVectorAX.shape != blockVectorX.shape:
1023
+ raise ValueError(
1024
+ f"The shape {blockVectorX.shape} "
1025
+ f"of the postprocessing iterate not preserved\n"
1026
+ f"and changed to {blockVectorAX.shape} "
1027
+ f"after multiplying by the primary matrix.\n"
1028
+ )
1029
+ gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
1030
+
1031
+ blockVectorBX = blockVectorX
1032
+ if B is not None:
1033
+ blockVectorBX = B(blockVectorX)
1034
+ if blockVectorBX.shape != blockVectorX.shape:
1035
+ raise ValueError(
1036
+ f"The shape {blockVectorX.shape} "
1037
+ f"of the postprocessing iterate not preserved\n"
1038
+ f"and changed to {blockVectorBX.shape} "
1039
+ f"after multiplying by the secondary matrix.\n"
1040
+ )
1041
+
1042
+ gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
1043
+ _handle_gramA_gramB_verbosity(gramXAX, gramXBX, verbosityLevel)
1044
+ gramXAX = (gramXAX + gramXAX.T.conj()) / 2
1045
+ gramXBX = (gramXBX + gramXBX.T.conj()) / 2
1046
+ try:
1047
+ _lambda, eigBlockVector = eigh(gramXAX,
1048
+ gramXBX,
1049
+ check_finite=False)
1050
+ except LinAlgError as e:
1051
+ raise ValueError("eigh has failed in lobpcg postprocessing") from e
1052
+
1053
+ ii = _get_indx(_lambda, sizeX, largest)
1054
+ _lambda = _lambda[ii]
1055
+ eigBlockVector = np.asarray(eigBlockVector[:, ii])
1056
+
1057
+ blockVectorX = np.dot(blockVectorX, eigBlockVector)
1058
+ blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
1059
+
1060
+ if B is not None:
1061
+ blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
1062
+ aux = blockVectorBX * _lambda[np.newaxis, :]
1063
+ else:
1064
+ aux = blockVectorX * _lambda[np.newaxis, :]
1065
+
1066
+ blockVectorR = blockVectorAX - aux
1067
+
1068
+ aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
1069
+ residualNorms = np.sqrt(np.abs(aux))
1070
+
1071
+ if retLambdaHistory:
1072
+ lambdaHistory[bestIterationNumber + 1, :] = _lambda
1073
+ if retResidualNormsHistory:
1074
+ residualNormsHistory[bestIterationNumber + 1, :] = residualNorms
1075
+
1076
+ if retLambdaHistory:
1077
+ lambdaHistory = lambdaHistory[
1078
+ : bestIterationNumber + 2, :]
1079
+ if retResidualNormsHistory:
1080
+ residualNormsHistory = residualNormsHistory[
1081
+ : bestIterationNumber + 2, :]
1082
+
1083
+ if np.max(np.abs(residualNorms)) > residualTolerance:
1084
+ warnings.warn(
1085
+ f"Exited postprocessing with accuracies \n"
1086
+ f"{residualNorms}\n"
1087
+ f"not reaching the requested tolerance {residualTolerance}.",
1088
+ UserWarning, stacklevel=2
1089
+ )
1090
+
1091
+ if verbosityLevel:
1092
+ print(f"Final postprocessing eigenvalue(s):\n{_lambda}")
1093
+ print(f"Final residual norm(s):\n{residualNorms}")
1094
+
1095
+ if retLambdaHistory:
1096
+ lambdaHistory = np.vsplit(lambdaHistory, np.shape(lambdaHistory)[0])
1097
+ lambdaHistory = [np.squeeze(i) for i in lambdaHistory]
1098
+ if retResidualNormsHistory:
1099
+ residualNormsHistory = np.vsplit(residualNormsHistory,
1100
+ np.shape(residualNormsHistory)[0])
1101
+ residualNormsHistory = [np.squeeze(i) for i in residualNormsHistory]
1102
+
1103
+ if retLambdaHistory:
1104
+ if retResidualNormsHistory:
1105
+ return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
1106
+ else:
1107
+ return _lambda, blockVectorX, lambdaHistory
1108
+ else:
1109
+ if retResidualNormsHistory:
1110
+ return _lambda, blockVectorX, residualNormsHistory
1111
+ else:
1112
+ return _lambda, blockVectorX
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (212 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/test_lobpcg.cpython-310.pyc ADDED
Binary file (19.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py ADDED
@@ -0,0 +1,645 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Test functions for the sparse.linalg._eigen.lobpcg module
2
+ """
3
+
4
+ import itertools
5
+ import platform
6
+ import sys
7
+ import pytest
8
+ import numpy as np
9
+ from numpy import ones, r_, diag
10
+ from numpy.testing import (assert_almost_equal, assert_equal,
11
+ assert_allclose, assert_array_less)
12
+
13
+ from scipy import sparse
14
+ from scipy.linalg import eig, eigh, toeplitz, orth
15
+ from scipy.sparse import spdiags, diags, eye, csr_matrix
16
+ from scipy.sparse.linalg import eigs, LinearOperator
17
+ from scipy.sparse.linalg._eigen.lobpcg import lobpcg
18
+ from scipy.sparse.linalg._eigen.lobpcg.lobpcg import _b_orthonormalize
19
+ from scipy._lib._util import np_long, np_ulong
20
+
21
+ _IS_32BIT = (sys.maxsize < 2**32)
22
+
23
+ INT_DTYPES = {np.intc, np_long, np.longlong, np.uintc, np_ulong, np.ulonglong}
24
+ # np.half is unsupported on many test systems so excluded
25
+ REAL_DTYPES = {np.float32, np.float64, np.longdouble}
26
+ COMPLEX_DTYPES = {np.complex64, np.complex128, np.clongdouble}
27
+ # use sorted list to ensure fixed order of tests
28
+ VDTYPES = sorted(REAL_DTYPES ^ COMPLEX_DTYPES, key=str)
29
+ MDTYPES = sorted(INT_DTYPES ^ REAL_DTYPES ^ COMPLEX_DTYPES, key=str)
30
+
31
+
32
+ def sign_align(A, B):
33
+ """Align signs of columns of A match those of B: column-wise remove
34
+ sign of A by multiplying with its sign then multiply in sign of B.
35
+ """
36
+ return np.array([col_A * np.sign(col_A[0]) * np.sign(col_B[0])
37
+ for col_A, col_B in zip(A.T, B.T)]).T
38
+
39
+ def ElasticRod(n):
40
+ """Build the matrices for the generalized eigenvalue problem of the
41
+ fixed-free elastic rod vibration model.
42
+ """
43
+ L = 1.0
44
+ le = L/n
45
+ rho = 7.85e3
46
+ S = 1.e-4
47
+ E = 2.1e11
48
+ mass = rho*S*le/6.
49
+ k = E*S/le
50
+ A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1))
51
+ B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1))
52
+ return A, B
53
+
54
+
55
+ def MikotaPair(n):
56
+ """Build a pair of full diagonal matrices for the generalized eigenvalue
57
+ problem. The Mikota pair acts as a nice test since the eigenvalues are the
58
+ squares of the integers n, n=1,2,...
59
+ """
60
+ x = np.arange(1, n+1)
61
+ B = diag(1./x)
62
+ y = np.arange(n-1, 0, -1)
63
+ z = np.arange(2*n-1, 0, -2)
64
+ A = diag(z)-diag(y, -1)-diag(y, 1)
65
+ return A, B
66
+
67
+
68
+ def compare_solutions(A, B, m):
69
+ """Check eig vs. lobpcg consistency.
70
+ """
71
+ n = A.shape[0]
72
+ rnd = np.random.RandomState(0)
73
+ V = rnd.random((n, m))
74
+ X = orth(V)
75
+ eigvals, _ = lobpcg(A, X, B=B, tol=1e-2, maxiter=50, largest=False)
76
+ eigvals.sort()
77
+ w, _ = eig(A, b=B)
78
+ w.sort()
79
+ assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2)
80
+
81
+
82
+ def test_Small():
83
+ A, B = ElasticRod(10)
84
+ with pytest.warns(UserWarning, match="The problem size"):
85
+ compare_solutions(A, B, 10)
86
+ A, B = MikotaPair(10)
87
+ with pytest.warns(UserWarning, match="The problem size"):
88
+ compare_solutions(A, B, 10)
89
+
90
+
91
+ def test_ElasticRod():
92
+ A, B = ElasticRod(20)
93
+ msg = "Exited at iteration.*|Exited postprocessing with accuracies.*"
94
+ with pytest.warns(UserWarning, match=msg):
95
+ compare_solutions(A, B, 2)
96
+
97
+
98
+ def test_MikotaPair():
99
+ A, B = MikotaPair(20)
100
+ compare_solutions(A, B, 2)
101
+
102
+
103
+ @pytest.mark.parametrize("n", [50])
104
+ @pytest.mark.parametrize("m", [1, 2, 10])
105
+ @pytest.mark.parametrize("Vdtype", sorted(REAL_DTYPES, key=str))
106
+ @pytest.mark.parametrize("Bdtype", sorted(REAL_DTYPES, key=str))
107
+ @pytest.mark.parametrize("BVdtype", sorted(REAL_DTYPES, key=str))
108
+ def test_b_orthonormalize(n, m, Vdtype, Bdtype, BVdtype):
109
+ """Test B-orthonormalization by Cholesky with callable 'B'.
110
+ The function '_b_orthonormalize' is key in LOBPCG but may
111
+ lead to numerical instabilities. The input vectors are often
112
+ badly scaled, so the function needs scale-invariant Cholesky;
113
+ see https://netlib.org/lapack/lawnspdf/lawn14.pdf.
114
+ """
115
+ rnd = np.random.RandomState(0)
116
+ X = rnd.standard_normal((n, m)).astype(Vdtype)
117
+ Xcopy = np.copy(X)
118
+ vals = np.arange(1, n+1, dtype=float)
119
+ B = diags([vals], [0], (n, n)).astype(Bdtype)
120
+ BX = B @ X
121
+ BX = BX.astype(BVdtype)
122
+ dtype = min(X.dtype, B.dtype, BX.dtype)
123
+ # np.longdouble tol cannot be achieved on most systems
124
+ atol = m * n * max(np.finfo(dtype).eps, np.finfo(np.float64).eps)
125
+
126
+ Xo, BXo, _ = _b_orthonormalize(lambda v: B @ v, X, BX)
127
+ # Check in-place.
128
+ assert_equal(X, Xo)
129
+ assert_equal(id(X), id(Xo))
130
+ assert_equal(BX, BXo)
131
+ assert_equal(id(BX), id(BXo))
132
+ # Check BXo.
133
+ assert_allclose(B @ Xo, BXo, atol=atol, rtol=atol)
134
+ # Check B-orthonormality
135
+ assert_allclose(Xo.T.conj() @ B @ Xo, np.identity(m),
136
+ atol=atol, rtol=atol)
137
+ # Repeat without BX in outputs
138
+ X = np.copy(Xcopy)
139
+ Xo1, BXo1, _ = _b_orthonormalize(lambda v: B @ v, X)
140
+ assert_allclose(Xo, Xo1, atol=atol, rtol=atol)
141
+ assert_allclose(BXo, BXo1, atol=atol, rtol=atol)
142
+ # Check in-place.
143
+ assert_equal(X, Xo1)
144
+ assert_equal(id(X), id(Xo1))
145
+ # Check BXo1.
146
+ assert_allclose(B @ Xo1, BXo1, atol=atol, rtol=atol)
147
+
148
+ # Introduce column-scaling in X.
149
+ scaling = 1.0 / np.geomspace(10, 1e10, num=m)
150
+ X = Xcopy * scaling
151
+ X = X.astype(Vdtype)
152
+ BX = B @ X
153
+ BX = BX.astype(BVdtype)
154
+ # Check scaling-invariance of Cholesky-based orthonormalization
155
+ Xo1, BXo1, _ = _b_orthonormalize(lambda v: B @ v, X, BX)
156
+ # The output should be the same, up the signs of the columns.
157
+ Xo1 = sign_align(Xo1, Xo)
158
+ assert_allclose(Xo, Xo1, atol=atol, rtol=atol)
159
+ BXo1 = sign_align(BXo1, BXo)
160
+ assert_allclose(BXo, BXo1, atol=atol, rtol=atol)
161
+
162
+
163
+ @pytest.mark.filterwarnings("ignore:Exited at iteration 0")
164
+ @pytest.mark.filterwarnings("ignore:Exited postprocessing")
165
+ def test_nonhermitian_warning(capsys):
166
+ """Check the warning of a Ritz matrix being not Hermitian
167
+ by feeding a non-Hermitian input matrix.
168
+ Also check stdout since verbosityLevel=1 and lack of stderr.
169
+ """
170
+ n = 10
171
+ X = np.arange(n * 2).reshape(n, 2).astype(np.float32)
172
+ A = np.arange(n * n).reshape(n, n).astype(np.float32)
173
+ with pytest.warns(UserWarning, match="Matrix gramA"):
174
+ _, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
175
+ out, err = capsys.readouterr() # Capture output
176
+ assert out.startswith("Solving standard eigenvalue") # Test stdout
177
+ assert err == '' # Test empty stderr
178
+ # Make the matrix symmetric and the UserWarning disappears.
179
+ A += A.T
180
+ _, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
181
+ out, err = capsys.readouterr() # Capture output
182
+ assert out.startswith("Solving standard eigenvalue") # Test stdout
183
+ assert err == '' # Test empty stderr
184
+
185
+
186
+ def test_regression():
187
+ """Check the eigenvalue of the identity matrix is one.
188
+ """
189
+ # https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
190
+ n = 10
191
+ X = np.ones((n, 1))
192
+ A = np.identity(n)
193
+ w, _ = lobpcg(A, X)
194
+ assert_allclose(w, [1])
195
+
196
+
197
+ @pytest.mark.filterwarnings("ignore:The problem size")
198
+ @pytest.mark.parametrize('n, m, m_excluded', [(30, 4, 3), (4, 2, 0)])
199
+ def test_diagonal(n, m, m_excluded):
200
+ """Test ``m - m_excluded`` eigenvalues and eigenvectors of
201
+ diagonal matrices of the size ``n`` varying matrix formats:
202
+ dense array, spare matrix, and ``LinearOperator`` for both
203
+ matrixes in the generalized eigenvalue problem ``Av = cBv``
204
+ and for the preconditioner.
205
+ """
206
+ rnd = np.random.RandomState(0)
207
+
208
+ # Define the generalized eigenvalue problem Av = cBv
209
+ # where (c, v) is a generalized eigenpair,
210
+ # A is the diagonal matrix whose entries are 1,...n,
211
+ # B is the identity matrix.
212
+ vals = np.arange(1, n+1, dtype=float)
213
+ A_s = diags([vals], [0], (n, n))
214
+ A_a = A_s.toarray()
215
+
216
+ def A_f(x):
217
+ return A_s @ x
218
+
219
+ A_lo = LinearOperator(matvec=A_f,
220
+ matmat=A_f,
221
+ shape=(n, n), dtype=float)
222
+
223
+ B_a = eye(n)
224
+ B_s = csr_matrix(B_a)
225
+
226
+ def B_f(x):
227
+ return B_a @ x
228
+
229
+ B_lo = LinearOperator(matvec=B_f,
230
+ matmat=B_f,
231
+ shape=(n, n), dtype=float)
232
+
233
+ # Let the preconditioner M be the inverse of A.
234
+ M_s = diags([1./vals], [0], (n, n))
235
+ M_a = M_s.toarray()
236
+
237
+ def M_f(x):
238
+ return M_s @ x
239
+
240
+ M_lo = LinearOperator(matvec=M_f,
241
+ matmat=M_f,
242
+ shape=(n, n), dtype=float)
243
+
244
+ # Pick random initial vectors.
245
+ X = rnd.normal(size=(n, m))
246
+
247
+ # Require that the returned eigenvectors be in the orthogonal complement
248
+ # of the first few standard basis vectors.
249
+ if m_excluded > 0:
250
+ Y = np.eye(n, m_excluded)
251
+ else:
252
+ Y = None
253
+
254
+ for A in [A_a, A_s, A_lo]:
255
+ for B in [B_a, B_s, B_lo]:
256
+ for M in [M_a, M_s, M_lo]:
257
+ eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y,
258
+ maxiter=40, largest=False)
259
+
260
+ assert_allclose(eigvals, np.arange(1+m_excluded,
261
+ 1+m_excluded+m))
262
+ _check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3)
263
+
264
+
265
+ def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
266
+ """Check if the eigenvalue residual is small.
267
+ """
268
+ mult_wV = np.multiply(w, V)
269
+ dot_MV = M.dot(V)
270
+ assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
271
+
272
+
273
+ def _check_fiedler(n, p):
274
+ """Check the Fiedler vector computation.
275
+ """
276
+ # This is not necessarily the recommended way to find the Fiedler vector.
277
+ col = np.zeros(n)
278
+ col[1] = 1
279
+ A = toeplitz(col)
280
+ D = np.diag(A.sum(axis=1))
281
+ L = D - A
282
+ # Compute the full eigendecomposition using tricks, e.g.
283
+ # http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
284
+ tmp = np.pi * np.arange(n) / n
285
+ analytic_w = 2 * (1 - np.cos(tmp))
286
+ analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
287
+ _check_eigen(L, analytic_w, analytic_V)
288
+ # Compute the full eigendecomposition using eigh.
289
+ eigh_w, eigh_V = eigh(L)
290
+ _check_eigen(L, eigh_w, eigh_V)
291
+ # Check that the first eigenvalue is near zero and that the rest agree.
292
+ assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
293
+ assert_allclose(eigh_w[1:], analytic_w[1:])
294
+
295
+ # Check small lobpcg eigenvalues.
296
+ X = analytic_V[:, :p]
297
+ lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
298
+ assert_equal(lobpcg_w.shape, (p,))
299
+ assert_equal(lobpcg_V.shape, (n, p))
300
+ _check_eigen(L, lobpcg_w, lobpcg_V)
301
+ assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
302
+ assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
303
+
304
+ # Check large lobpcg eigenvalues.
305
+ X = analytic_V[:, -p:]
306
+ lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
307
+ assert_equal(lobpcg_w.shape, (p,))
308
+ assert_equal(lobpcg_V.shape, (n, p))
309
+ _check_eigen(L, lobpcg_w, lobpcg_V)
310
+ assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
311
+
312
+ # Look for the Fiedler vector using good but not exactly correct guesses.
313
+ fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
314
+ X = np.vstack((np.ones(n), fiedler_guess)).T
315
+ lobpcg_w, _ = lobpcg(L, X, largest=False)
316
+ # Mathematically, the smaller eigenvalue should be zero
317
+ # and the larger should be the algebraic connectivity.
318
+ lobpcg_w = np.sort(lobpcg_w)
319
+ assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
320
+
321
+
322
+ def test_fiedler_small_8():
323
+ """Check the dense workaround path for small matrices.
324
+ """
325
+ # This triggers the dense path because 8 < 2*5.
326
+ with pytest.warns(UserWarning, match="The problem size"):
327
+ _check_fiedler(8, 2)
328
+
329
+
330
+ def test_fiedler_large_12():
331
+ """Check the dense workaround path avoided for non-small matrices.
332
+ """
333
+ # This does not trigger the dense path, because 2*5 <= 12.
334
+ _check_fiedler(12, 2)
335
+
336
+
337
+ @pytest.mark.filterwarnings("ignore:Failed at iteration")
338
+ @pytest.mark.filterwarnings("ignore:Exited at iteration")
339
+ @pytest.mark.filterwarnings("ignore:Exited postprocessing")
340
+ def test_failure_to_run_iterations():
341
+ """Check that the code exits gracefully without breaking. Issue #10974.
342
+ The code may or not issue a warning, filtered out. Issue #15935, #17954.
343
+ """
344
+ rnd = np.random.RandomState(0)
345
+ X = rnd.standard_normal((100, 10))
346
+ A = X @ X.T
347
+ Q = rnd.standard_normal((X.shape[0], 4))
348
+ eigenvalues, _ = lobpcg(A, Q, maxiter=40, tol=1e-12)
349
+ assert np.max(eigenvalues) > 0
350
+
351
+
352
+ def test_failure_to_run_iterations_nonsymmetric():
353
+ """Check that the code exists gracefully without breaking
354
+ if the matrix in not symmetric.
355
+ """
356
+ A = np.zeros((10, 10))
357
+ A[0, 1] = 1
358
+ Q = np.ones((10, 1))
359
+ msg = "Exited at iteration 2|Exited postprocessing with accuracies.*"
360
+ with pytest.warns(UserWarning, match=msg):
361
+ eigenvalues, _ = lobpcg(A, Q, maxiter=20)
362
+ assert np.max(eigenvalues) > 0
363
+
364
+
365
+ @pytest.mark.filterwarnings("ignore:The problem size")
366
+ def test_hermitian():
367
+ """Check complex-value Hermitian cases.
368
+ """
369
+ rnd = np.random.RandomState(0)
370
+
371
+ sizes = [3, 12]
372
+ ks = [1, 2]
373
+ gens = [True, False]
374
+
375
+ for s, k, gen, dh, dx, db in (
376
+ itertools.product(sizes, ks, gens, gens, gens, gens)
377
+ ):
378
+ H = rnd.random((s, s)) + 1.j * rnd.random((s, s))
379
+ H = 10 * np.eye(s) + H + H.T.conj()
380
+ H = H.astype(np.complex128) if dh else H.astype(np.complex64)
381
+
382
+ X = rnd.standard_normal((s, k))
383
+ X = X + 1.j * rnd.standard_normal((s, k))
384
+ X = X.astype(np.complex128) if dx else X.astype(np.complex64)
385
+
386
+ if not gen:
387
+ B = np.eye(s)
388
+ w, v = lobpcg(H, X, maxiter=99, verbosityLevel=0)
389
+ # Also test mixing complex H with real B.
390
+ wb, _ = lobpcg(H, X, B, maxiter=99, verbosityLevel=0)
391
+ assert_allclose(w, wb, rtol=1e-6)
392
+ w0, _ = eigh(H)
393
+ else:
394
+ B = rnd.random((s, s)) + 1.j * rnd.random((s, s))
395
+ B = 10 * np.eye(s) + B.dot(B.T.conj())
396
+ B = B.astype(np.complex128) if db else B.astype(np.complex64)
397
+ w, v = lobpcg(H, X, B, maxiter=99, verbosityLevel=0)
398
+ w0, _ = eigh(H, B)
399
+
400
+ for wx, vx in zip(w, v.T):
401
+ # Check eigenvector
402
+ assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx)
403
+ / np.linalg.norm(H.dot(vx)),
404
+ 0, atol=5e-2, rtol=0)
405
+
406
+ # Compare eigenvalues
407
+ j = np.argmin(abs(w0 - wx))
408
+ assert_allclose(wx, w0[j], rtol=1e-4)
409
+
410
+
411
+ # The n=5 case tests the alternative small matrix code path that uses eigh().
412
+ @pytest.mark.filterwarnings("ignore:The problem size")
413
+ @pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)])
414
+ def test_eigs_consistency(n, atol):
415
+ """Check eigs vs. lobpcg consistency.
416
+ """
417
+ vals = np.arange(1, n+1, dtype=np.float64)
418
+ A = spdiags(vals, 0, n, n)
419
+ rnd = np.random.RandomState(0)
420
+ X = rnd.standard_normal((n, 2))
421
+ lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100)
422
+ vals, _ = eigs(A, k=2)
423
+
424
+ _check_eigen(A, lvals, lvecs, atol=atol, rtol=0)
425
+ assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14)
426
+
427
+
428
+ def test_verbosity():
429
+ """Check that nonzero verbosity level code runs.
430
+ """
431
+ rnd = np.random.RandomState(0)
432
+ X = rnd.standard_normal((10, 10))
433
+ A = X @ X.T
434
+ Q = rnd.standard_normal((X.shape[0], 1))
435
+ msg = "Exited at iteration.*|Exited postprocessing with accuracies.*"
436
+ with pytest.warns(UserWarning, match=msg):
437
+ _, _ = lobpcg(A, Q, maxiter=3, verbosityLevel=9)
438
+
439
+
440
+ @pytest.mark.xfail(_IS_32BIT and sys.platform == 'win32',
441
+ reason="tolerance violation on windows")
442
+ @pytest.mark.xfail(platform.machine() == 'ppc64le',
443
+ reason="fails on ppc64le")
444
+ @pytest.mark.filterwarnings("ignore:Exited postprocessing")
445
+ def test_tolerance_float32():
446
+ """Check lobpcg for attainable tolerance in float32.
447
+ """
448
+ rnd = np.random.RandomState(0)
449
+ n = 50
450
+ m = 3
451
+ vals = -np.arange(1, n + 1)
452
+ A = diags([vals], [0], (n, n))
453
+ A = A.astype(np.float32)
454
+ X = rnd.standard_normal((n, m))
455
+ X = X.astype(np.float32)
456
+ eigvals, _ = lobpcg(A, X, tol=1.25e-5, maxiter=50, verbosityLevel=0)
457
+ assert_allclose(eigvals, -np.arange(1, 1 + m), atol=2e-5, rtol=1e-5)
458
+
459
+
460
+ @pytest.mark.parametrize("vdtype", VDTYPES)
461
+ @pytest.mark.parametrize("mdtype", MDTYPES)
462
+ @pytest.mark.parametrize("arr_type", [np.array,
463
+ sparse.csr_matrix,
464
+ sparse.coo_matrix])
465
+ def test_dtypes(vdtype, mdtype, arr_type):
466
+ """Test lobpcg in various dtypes.
467
+ """
468
+ rnd = np.random.RandomState(0)
469
+ n = 12
470
+ m = 2
471
+ A = arr_type(np.diag(np.arange(1, n + 1)).astype(mdtype))
472
+ X = rnd.random((n, m))
473
+ X = X.astype(vdtype)
474
+ eigvals, eigvecs = lobpcg(A, X, tol=1e-2, largest=False)
475
+ assert_allclose(eigvals, np.arange(1, 1 + m), atol=1e-1)
476
+ # eigenvectors must be nearly real in any case
477
+ assert_allclose(np.sum(np.abs(eigvecs - eigvecs.conj())), 0, atol=1e-2)
478
+
479
+
480
+ @pytest.mark.filterwarnings("ignore:Exited at iteration")
481
+ @pytest.mark.filterwarnings("ignore:Exited postprocessing")
482
+ def test_inplace_warning():
483
+ """Check lobpcg gives a warning in '_b_orthonormalize'
484
+ that in-place orthogonalization is impossible due to dtype mismatch.
485
+ """
486
+ rnd = np.random.RandomState(0)
487
+ n = 6
488
+ m = 1
489
+ vals = -np.arange(1, n + 1)
490
+ A = diags([vals], [0], (n, n))
491
+ A = A.astype(np.cdouble)
492
+ X = rnd.standard_normal((n, m))
493
+ with pytest.warns(UserWarning, match="Inplace update"):
494
+ eigvals, _ = lobpcg(A, X, maxiter=2, verbosityLevel=1)
495
+
496
+
497
+ def test_maxit():
498
+ """Check lobpcg if maxit=maxiter runs maxiter iterations and
499
+ if maxit=None runs 20 iterations (the default)
500
+ by checking the size of the iteration history output, which should
501
+ be the number of iterations plus 3 (initial, final, and postprocessing)
502
+ typically when maxiter is small and the choice of the best is passive.
503
+ """
504
+ rnd = np.random.RandomState(0)
505
+ n = 50
506
+ m = 4
507
+ vals = -np.arange(1, n + 1)
508
+ A = diags([vals], [0], (n, n))
509
+ A = A.astype(np.float32)
510
+ X = rnd.standard_normal((n, m))
511
+ X = X.astype(np.float64)
512
+ msg = "Exited at iteration.*|Exited postprocessing with accuracies.*"
513
+ for maxiter in range(1, 4):
514
+ with pytest.warns(UserWarning, match=msg):
515
+ _, _, l_h, r_h = lobpcg(A, X, tol=1e-8, maxiter=maxiter,
516
+ retLambdaHistory=True,
517
+ retResidualNormsHistory=True)
518
+ assert_allclose(np.shape(l_h)[0], maxiter+3)
519
+ assert_allclose(np.shape(r_h)[0], maxiter+3)
520
+ with pytest.warns(UserWarning, match=msg):
521
+ l, _, l_h, r_h = lobpcg(A, X, tol=1e-8,
522
+ retLambdaHistory=True,
523
+ retResidualNormsHistory=True)
524
+ assert_allclose(np.shape(l_h)[0], 20+3)
525
+ assert_allclose(np.shape(r_h)[0], 20+3)
526
+ # Check that eigenvalue output is the last one in history
527
+ assert_allclose(l, l_h[-1])
528
+ # Make sure that both history outputs are lists
529
+ assert isinstance(l_h, list)
530
+ assert isinstance(r_h, list)
531
+ # Make sure that both history lists are arrays-like
532
+ assert_allclose(np.shape(l_h), np.shape(np.asarray(l_h)))
533
+ assert_allclose(np.shape(r_h), np.shape(np.asarray(r_h)))
534
+
535
+
536
+ @pytest.mark.slow
537
+ @pytest.mark.parametrize("n", [15])
538
+ @pytest.mark.parametrize("m", [1, 2])
539
+ @pytest.mark.filterwarnings("ignore:Exited at iteration")
540
+ @pytest.mark.filterwarnings("ignore:Exited postprocessing")
541
+ def test_diagonal_data_types(n, m):
542
+ """Check lobpcg for diagonal matrices for all matrix types.
543
+ Constraints are imposed, so a dense eigensolver eig cannot run.
544
+ """
545
+ rnd = np.random.RandomState(0)
546
+ # Define the generalized eigenvalue problem Av = cBv
547
+ # where (c, v) is a generalized eigenpair,
548
+ # and where we choose A and B to be diagonal.
549
+ vals = np.arange(1, n + 1)
550
+
551
+ # list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']
552
+ list_sparse_format = ['coo']
553
+ sparse_formats = len(list_sparse_format)
554
+ for s_f_i, s_f in enumerate(list_sparse_format):
555
+
556
+ As64 = diags([vals * vals], [0], (n, n), format=s_f)
557
+ As32 = As64.astype(np.float32)
558
+ Af64 = As64.toarray()
559
+ Af32 = Af64.astype(np.float32)
560
+
561
+ def As32f(x):
562
+ return As32 @ x
563
+ As32LO = LinearOperator(matvec=As32f,
564
+ matmat=As32f,
565
+ shape=(n, n),
566
+ dtype=As32.dtype)
567
+
568
+ listA = [Af64, As64, Af32, As32, As32f, As32LO, lambda v: As32 @ v]
569
+
570
+ Bs64 = diags([vals], [0], (n, n), format=s_f)
571
+ Bf64 = Bs64.toarray()
572
+ Bs32 = Bs64.astype(np.float32)
573
+
574
+ def Bs32f(x):
575
+ return Bs32 @ x
576
+ Bs32LO = LinearOperator(matvec=Bs32f,
577
+ matmat=Bs32f,
578
+ shape=(n, n),
579
+ dtype=Bs32.dtype)
580
+ listB = [Bf64, Bs64, Bs32, Bs32f, Bs32LO, lambda v: Bs32 @ v]
581
+
582
+ # Define the preconditioner function as LinearOperator.
583
+ Ms64 = diags([1./vals], [0], (n, n), format=s_f)
584
+
585
+ def Ms64precond(x):
586
+ return Ms64 @ x
587
+ Ms64precondLO = LinearOperator(matvec=Ms64precond,
588
+ matmat=Ms64precond,
589
+ shape=(n, n),
590
+ dtype=Ms64.dtype)
591
+ Mf64 = Ms64.toarray()
592
+
593
+ def Mf64precond(x):
594
+ return Mf64 @ x
595
+ Mf64precondLO = LinearOperator(matvec=Mf64precond,
596
+ matmat=Mf64precond,
597
+ shape=(n, n),
598
+ dtype=Mf64.dtype)
599
+ Ms32 = Ms64.astype(np.float32)
600
+
601
+ def Ms32precond(x):
602
+ return Ms32 @ x
603
+ Ms32precondLO = LinearOperator(matvec=Ms32precond,
604
+ matmat=Ms32precond,
605
+ shape=(n, n),
606
+ dtype=Ms32.dtype)
607
+ Mf32 = Ms32.toarray()
608
+
609
+ def Mf32precond(x):
610
+ return Mf32 @ x
611
+ Mf32precondLO = LinearOperator(matvec=Mf32precond,
612
+ matmat=Mf32precond,
613
+ shape=(n, n),
614
+ dtype=Mf32.dtype)
615
+ listM = [None, Ms64, Ms64precondLO, Mf64precondLO, Ms64precond,
616
+ Ms32, Ms32precondLO, Mf32precondLO, Ms32precond]
617
+
618
+ # Setup matrix of the initial approximation to the eigenvectors
619
+ # (cannot be sparse array).
620
+ Xf64 = rnd.random((n, m))
621
+ Xf32 = Xf64.astype(np.float32)
622
+ listX = [Xf64, Xf32]
623
+
624
+ # Require that the returned eigenvectors be in the orthogonal complement
625
+ # of the first few standard basis vectors (cannot be sparse array).
626
+ m_excluded = 3
627
+ Yf64 = np.eye(n, m_excluded, dtype=float)
628
+ Yf32 = np.eye(n, m_excluded, dtype=np.float32)
629
+ listY = [Yf64, Yf32]
630
+
631
+ tests = list(itertools.product(listA, listB, listM, listX, listY))
632
+ # This is one of the slower tests because there are >1,000 configs
633
+ # to test here, instead of checking product of all input, output types
634
+ # test each configuration for the first sparse format, and then
635
+ # for one additional sparse format. this takes 2/7=30% as long as
636
+ # testing all configurations for all sparse formats.
637
+ if s_f_i > 0:
638
+ tests = tests[s_f_i - 1::sparse_formats-1]
639
+
640
+ for A, B, M, X, Y in tests:
641
+ eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4,
642
+ maxiter=100, largest=False)
643
+ assert_allclose(eigvals,
644
+ np.arange(1 + m_excluded, 1 + m_excluded + m),
645
+ atol=1e-5)
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (205 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc ADDED
Binary file (24.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/test_svds.py ADDED
@@ -0,0 +1,862 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import copy
3
+ import numpy as np
4
+
5
+ from numpy.testing import assert_allclose, assert_equal, assert_array_equal
6
+ import pytest
7
+
8
+ from scipy.linalg import svd, null_space
9
+ from scipy.sparse import csc_matrix, issparse, spdiags, random
10
+ from scipy.sparse.linalg import LinearOperator, aslinearoperator
11
+ from scipy.sparse.linalg import svds
12
+ from scipy.sparse.linalg._eigen.arpack import ArpackNoConvergence
13
+
14
+
15
+ # --- Helper Functions / Classes ---
16
+
17
+
18
+ def sorted_svd(m, k, which='LM'):
19
+ # Compute svd of a dense matrix m, and return singular vectors/values
20
+ # sorted.
21
+ if issparse(m):
22
+ m = m.toarray()
23
+ u, s, vh = svd(m)
24
+ if which == 'LM':
25
+ ii = np.argsort(s)[-k:]
26
+ elif which == 'SM':
27
+ ii = np.argsort(s)[:k]
28
+ else:
29
+ raise ValueError(f"unknown which={which!r}")
30
+
31
+ return u[:, ii], s[ii], vh[ii]
32
+
33
+
34
+ def _check_svds(A, k, u, s, vh, which="LM", check_usvh_A=False,
35
+ check_svd=True, atol=1e-10, rtol=1e-7):
36
+ n, m = A.shape
37
+
38
+ # Check shapes.
39
+ assert_equal(u.shape, (n, k))
40
+ assert_equal(s.shape, (k,))
41
+ assert_equal(vh.shape, (k, m))
42
+
43
+ # Check that the original matrix can be reconstituted.
44
+ A_rebuilt = (u*s).dot(vh)
45
+ assert_equal(A_rebuilt.shape, A.shape)
46
+ if check_usvh_A:
47
+ assert_allclose(A_rebuilt, A, atol=atol, rtol=rtol)
48
+
49
+ # Check that u is a semi-orthogonal matrix.
50
+ uh_u = np.dot(u.T.conj(), u)
51
+ assert_equal(uh_u.shape, (k, k))
52
+ assert_allclose(uh_u, np.identity(k), atol=atol, rtol=rtol)
53
+
54
+ # Check that vh is a semi-orthogonal matrix.
55
+ vh_v = np.dot(vh, vh.T.conj())
56
+ assert_equal(vh_v.shape, (k, k))
57
+ assert_allclose(vh_v, np.identity(k), atol=atol, rtol=rtol)
58
+
59
+ # Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
60
+ if check_svd:
61
+ u2, s2, vh2 = sorted_svd(A, k, which)
62
+ assert_allclose(np.abs(u), np.abs(u2), atol=atol, rtol=rtol)
63
+ assert_allclose(s, s2, atol=atol, rtol=rtol)
64
+ assert_allclose(np.abs(vh), np.abs(vh2), atol=atol, rtol=rtol)
65
+
66
+
67
+ def _check_svds_n(A, k, u, s, vh, which="LM", check_res=True,
68
+ check_svd=True, atol=1e-10, rtol=1e-7):
69
+ n, m = A.shape
70
+
71
+ # Check shapes.
72
+ assert_equal(u.shape, (n, k))
73
+ assert_equal(s.shape, (k,))
74
+ assert_equal(vh.shape, (k, m))
75
+
76
+ # Check that u is a semi-orthogonal matrix.
77
+ uh_u = np.dot(u.T.conj(), u)
78
+ assert_equal(uh_u.shape, (k, k))
79
+ error = np.sum(np.abs(uh_u - np.identity(k))) / (k * k)
80
+ assert_allclose(error, 0.0, atol=atol, rtol=rtol)
81
+
82
+ # Check that vh is a semi-orthogonal matrix.
83
+ vh_v = np.dot(vh, vh.T.conj())
84
+ assert_equal(vh_v.shape, (k, k))
85
+ error = np.sum(np.abs(vh_v - np.identity(k))) / (k * k)
86
+ assert_allclose(error, 0.0, atol=atol, rtol=rtol)
87
+
88
+ # Check residuals
89
+ if check_res:
90
+ ru = A.T.conj() @ u - vh.T.conj() * s
91
+ rus = np.sum(np.abs(ru)) / (n * k)
92
+ rvh = A @ vh.T.conj() - u * s
93
+ rvhs = np.sum(np.abs(rvh)) / (m * k)
94
+ assert_allclose(rus, 0.0, atol=atol, rtol=rtol)
95
+ assert_allclose(rvhs, 0.0, atol=atol, rtol=rtol)
96
+
97
+ # Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
98
+ if check_svd:
99
+ u2, s2, vh2 = sorted_svd(A, k, which)
100
+ assert_allclose(s, s2, atol=atol, rtol=rtol)
101
+ A_rebuilt_svd = (u2*s2).dot(vh2)
102
+ A_rebuilt = (u*s).dot(vh)
103
+ assert_equal(A_rebuilt.shape, A.shape)
104
+ error = np.sum(np.abs(A_rebuilt_svd - A_rebuilt)) / (k * k)
105
+ assert_allclose(error, 0.0, atol=atol, rtol=rtol)
106
+
107
+
108
+ class CheckingLinearOperator(LinearOperator):
109
+ def __init__(self, A):
110
+ self.A = A
111
+ self.dtype = A.dtype
112
+ self.shape = A.shape
113
+
114
+ def _matvec(self, x):
115
+ assert_equal(max(x.shape), np.size(x))
116
+ return self.A.dot(x)
117
+
118
+ def _rmatvec(self, x):
119
+ assert_equal(max(x.shape), np.size(x))
120
+ return self.A.T.conjugate().dot(x)
121
+
122
+
123
+ # --- Test Input Validation ---
124
+ # Tests input validation on parameters `k` and `which`.
125
+ # Needs better input validation checks for all other parameters.
126
+
127
+ class SVDSCommonTests:
128
+
129
+ solver = None
130
+
131
+ # some of these IV tests could run only once, say with solver=None
132
+
133
+ _A_empty_msg = "`A` must not be empty."
134
+ _A_dtype_msg = "`A` must be of floating or complex floating data type"
135
+ _A_type_msg = "type not understood"
136
+ _A_ndim_msg = "array must have ndim <= 2"
137
+ _A_validation_inputs = [
138
+ (np.asarray([[]]), ValueError, _A_empty_msg),
139
+ (np.asarray([[1, 2], [3, 4]]), ValueError, _A_dtype_msg),
140
+ ("hi", TypeError, _A_type_msg),
141
+ (np.asarray([[[1., 2.], [3., 4.]]]), ValueError, _A_ndim_msg)]
142
+
143
+ @pytest.mark.parametrize("args", _A_validation_inputs)
144
+ def test_svds_input_validation_A(self, args):
145
+ A, error_type, message = args
146
+ with pytest.raises(error_type, match=message):
147
+ svds(A, k=1, solver=self.solver)
148
+
149
+ @pytest.mark.parametrize("k", [-1, 0, 3, 4, 5, 1.5, "1"])
150
+ def test_svds_input_validation_k_1(self, k):
151
+ rng = np.random.default_rng(0)
152
+ A = rng.random((4, 3))
153
+
154
+ # propack can do complete SVD
155
+ if self.solver == 'propack' and k == 3:
156
+ res = svds(A, k=k, solver=self.solver, random_state=0)
157
+ _check_svds(A, k, *res, check_usvh_A=True, check_svd=True)
158
+ return
159
+
160
+ message = ("`k` must be an integer satisfying")
161
+ with pytest.raises(ValueError, match=message):
162
+ svds(A, k=k, solver=self.solver)
163
+
164
+ def test_svds_input_validation_k_2(self):
165
+ # I think the stack trace is reasonable when `k` can't be converted
166
+ # to an int.
167
+ message = "int() argument must be a"
168
+ with pytest.raises(TypeError, match=re.escape(message)):
169
+ svds(np.eye(10), k=[], solver=self.solver)
170
+
171
+ message = "invalid literal for int()"
172
+ with pytest.raises(ValueError, match=message):
173
+ svds(np.eye(10), k="hi", solver=self.solver)
174
+
175
+ @pytest.mark.parametrize("tol", (-1, np.inf, np.nan))
176
+ def test_svds_input_validation_tol_1(self, tol):
177
+ message = "`tol` must be a non-negative floating point value."
178
+ with pytest.raises(ValueError, match=message):
179
+ svds(np.eye(10), tol=tol, solver=self.solver)
180
+
181
+ @pytest.mark.parametrize("tol", ([], 'hi'))
182
+ def test_svds_input_validation_tol_2(self, tol):
183
+ # I think the stack trace is reasonable here
184
+ message = "'<' not supported between instances"
185
+ with pytest.raises(TypeError, match=message):
186
+ svds(np.eye(10), tol=tol, solver=self.solver)
187
+
188
+ @pytest.mark.parametrize("which", ('LA', 'SA', 'ekki', 0))
189
+ def test_svds_input_validation_which(self, which):
190
+ # Regression test for a github issue.
191
+ # https://github.com/scipy/scipy/issues/4590
192
+ # Function was not checking for eigenvalue type and unintended
193
+ # values could be returned.
194
+ with pytest.raises(ValueError, match="`which` must be in"):
195
+ svds(np.eye(10), which=which, solver=self.solver)
196
+
197
+ @pytest.mark.parametrize("transpose", (True, False))
198
+ @pytest.mark.parametrize("n", range(4, 9))
199
+ def test_svds_input_validation_v0_1(self, transpose, n):
200
+ rng = np.random.default_rng(0)
201
+ A = rng.random((5, 7))
202
+ v0 = rng.random(n)
203
+ if transpose:
204
+ A = A.T
205
+ k = 2
206
+ message = "`v0` must have shape"
207
+
208
+ required_length = (A.shape[0] if self.solver == 'propack'
209
+ else min(A.shape))
210
+ if n != required_length:
211
+ with pytest.raises(ValueError, match=message):
212
+ svds(A, k=k, v0=v0, solver=self.solver)
213
+
214
+ def test_svds_input_validation_v0_2(self):
215
+ A = np.ones((10, 10))
216
+ v0 = np.ones((1, 10))
217
+ message = "`v0` must have shape"
218
+ with pytest.raises(ValueError, match=message):
219
+ svds(A, k=1, v0=v0, solver=self.solver)
220
+
221
+ @pytest.mark.parametrize("v0", ("hi", 1, np.ones(10, dtype=int)))
222
+ def test_svds_input_validation_v0_3(self, v0):
223
+ A = np.ones((10, 10))
224
+ message = "`v0` must be of floating or complex floating data type."
225
+ with pytest.raises(ValueError, match=message):
226
+ svds(A, k=1, v0=v0, solver=self.solver)
227
+
228
+ @pytest.mark.parametrize("maxiter", (-1, 0, 5.5))
229
+ def test_svds_input_validation_maxiter_1(self, maxiter):
230
+ message = ("`maxiter` must be a positive integer.")
231
+ with pytest.raises(ValueError, match=message):
232
+ svds(np.eye(10), maxiter=maxiter, solver=self.solver)
233
+
234
+ def test_svds_input_validation_maxiter_2(self):
235
+ # I think the stack trace is reasonable when `k` can't be converted
236
+ # to an int.
237
+ message = "int() argument must be a"
238
+ with pytest.raises(TypeError, match=re.escape(message)):
239
+ svds(np.eye(10), maxiter=[], solver=self.solver)
240
+
241
+ message = "invalid literal for int()"
242
+ with pytest.raises(ValueError, match=message):
243
+ svds(np.eye(10), maxiter="hi", solver=self.solver)
244
+
245
+ @pytest.mark.parametrize("rsv", ('ekki', 10))
246
+ def test_svds_input_validation_return_singular_vectors(self, rsv):
247
+ message = "`return_singular_vectors` must be in"
248
+ with pytest.raises(ValueError, match=message):
249
+ svds(np.eye(10), return_singular_vectors=rsv, solver=self.solver)
250
+
251
+ # --- Test Parameters ---
252
+
253
+ @pytest.mark.parametrize("k", [3, 5])
254
+ @pytest.mark.parametrize("which", ["LM", "SM"])
255
+ def test_svds_parameter_k_which(self, k, which):
256
+ # check that the `k` parameter sets the number of eigenvalues/
257
+ # eigenvectors returned.
258
+ # Also check that the `which` parameter sets whether the largest or
259
+ # smallest eigenvalues are returned
260
+ rng = np.random.default_rng(0)
261
+ A = rng.random((10, 10))
262
+ if self.solver == 'lobpcg':
263
+ with pytest.warns(UserWarning, match="The problem size"):
264
+ res = svds(A, k=k, which=which, solver=self.solver,
265
+ random_state=0)
266
+ else:
267
+ res = svds(A, k=k, which=which, solver=self.solver,
268
+ random_state=0)
269
+ _check_svds(A, k, *res, which=which, atol=8e-10)
270
+
271
+ @pytest.mark.filterwarnings("ignore:Exited",
272
+ reason="Ignore LOBPCG early exit.")
273
+ # loop instead of parametrize for simplicity
274
+ def test_svds_parameter_tol(self):
275
+ # check the effect of the `tol` parameter on solver accuracy by solving
276
+ # the same problem with varying `tol` and comparing the eigenvalues
277
+ # against ground truth computed
278
+ n = 100 # matrix size
279
+ k = 3 # number of eigenvalues to check
280
+
281
+ # generate a random, sparse-ish matrix
282
+ # effect isn't apparent for matrices that are too small
283
+ rng = np.random.default_rng(0)
284
+ A = rng.random((n, n))
285
+ A[A > .1] = 0
286
+ A = A @ A.T
287
+
288
+ _, s, _ = svd(A) # calculate ground truth
289
+
290
+ # calculate the error as a function of `tol`
291
+ A = csc_matrix(A)
292
+
293
+ def err(tol):
294
+ _, s2, _ = svds(A, k=k, v0=np.ones(n), maxiter=1000,
295
+ solver=self.solver, tol=tol, random_state=0)
296
+ return np.linalg.norm((s2 - s[k-1::-1])/s[k-1::-1])
297
+
298
+ tols = [1e-4, 1e-2, 1e0] # tolerance levels to check
299
+ # for 'arpack' and 'propack', accuracies make discrete steps
300
+ accuracies = {'propack': [1e-12, 1e-6, 1e-4],
301
+ 'arpack': [2.5e-15, 1e-10, 1e-10],
302
+ 'lobpcg': [2e-12, 4e-2, 2]}
303
+
304
+ for tol, accuracy in zip(tols, accuracies[self.solver]):
305
+ error = err(tol)
306
+ assert error < accuracy
307
+
308
+ def test_svd_v0(self):
309
+ # check that the `v0` parameter affects the solution
310
+ n = 100
311
+ k = 1
312
+ # If k != 1, LOBPCG needs more initial vectors, which are generated
313
+ # with random_state, so it does not pass w/ k >= 2.
314
+ # For some other values of `n`, the AssertionErrors are not raised
315
+ # with different v0s, which is reasonable.
316
+
317
+ rng = np.random.default_rng(0)
318
+ A = rng.random((n, n))
319
+
320
+ # with the same v0, solutions are the same, and they are accurate
321
+ # v0 takes precedence over random_state
322
+ v0a = rng.random(n)
323
+ res1a = svds(A, k, v0=v0a, solver=self.solver, random_state=0)
324
+ res2a = svds(A, k, v0=v0a, solver=self.solver, random_state=1)
325
+ for idx in range(3):
326
+ assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
327
+ _check_svds(A, k, *res1a)
328
+
329
+ # with the same v0, solutions are the same, and they are accurate
330
+ v0b = rng.random(n)
331
+ res1b = svds(A, k, v0=v0b, solver=self.solver, random_state=2)
332
+ res2b = svds(A, k, v0=v0b, solver=self.solver, random_state=3)
333
+ for idx in range(3):
334
+ assert_allclose(res1b[idx], res2b[idx], rtol=1e-15, atol=2e-16)
335
+ _check_svds(A, k, *res1b)
336
+
337
+ # with different v0, solutions can be numerically different
338
+ message = "Arrays are not equal"
339
+ with pytest.raises(AssertionError, match=message):
340
+ assert_equal(res1a, res1b)
341
+
342
+ def test_svd_random_state(self):
343
+ # check that the `random_state` parameter affects the solution
344
+ # Admittedly, `n` and `k` are chosen so that all solver pass all
345
+ # these checks. That's a tall order, since LOBPCG doesn't want to
346
+ # achieve the desired accuracy and ARPACK often returns the same
347
+ # singular values/vectors for different v0.
348
+ n = 100
349
+ k = 1
350
+
351
+ rng = np.random.default_rng(0)
352
+ A = rng.random((n, n))
353
+
354
+ # with the same random_state, solutions are the same and accurate
355
+ res1a = svds(A, k, solver=self.solver, random_state=0)
356
+ res2a = svds(A, k, solver=self.solver, random_state=0)
357
+ for idx in range(3):
358
+ assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
359
+ _check_svds(A, k, *res1a)
360
+
361
+ # with the same random_state, solutions are the same and accurate
362
+ res1b = svds(A, k, solver=self.solver, random_state=1)
363
+ res2b = svds(A, k, solver=self.solver, random_state=1)
364
+ for idx in range(3):
365
+ assert_allclose(res1b[idx], res2b[idx], rtol=1e-15, atol=2e-16)
366
+ _check_svds(A, k, *res1b)
367
+
368
+ # with different random_state, solutions can be numerically different
369
+ message = "Arrays are not equal"
370
+ with pytest.raises(AssertionError, match=message):
371
+ assert_equal(res1a, res1b)
372
+
373
+ @pytest.mark.parametrize("random_state", (0, 1,
374
+ np.random.RandomState(0),
375
+ np.random.default_rng(0)))
376
+ def test_svd_random_state_2(self, random_state):
377
+ n = 100
378
+ k = 1
379
+
380
+ rng = np.random.default_rng(0)
381
+ A = rng.random((n, n))
382
+
383
+ random_state_2 = copy.deepcopy(random_state)
384
+
385
+ # with the same random_state, solutions are the same and accurate
386
+ res1a = svds(A, k, solver=self.solver, random_state=random_state)
387
+ res2a = svds(A, k, solver=self.solver, random_state=random_state_2)
388
+ for idx in range(3):
389
+ assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
390
+ _check_svds(A, k, *res1a)
391
+
392
+ @pytest.mark.parametrize("random_state", (None,
393
+ np.random.RandomState(0),
394
+ np.random.default_rng(0)))
395
+ @pytest.mark.filterwarnings("ignore:Exited",
396
+ reason="Ignore LOBPCG early exit.")
397
+ def test_svd_random_state_3(self, random_state):
398
+ n = 100
399
+ k = 5
400
+
401
+ rng = np.random.default_rng(0)
402
+ A = rng.random((n, n))
403
+
404
+ random_state = copy.deepcopy(random_state)
405
+
406
+ # random_state in different state produces accurate - but not
407
+ # not necessarily identical - results
408
+ res1a = svds(A, k, solver=self.solver, random_state=random_state, maxiter=1000)
409
+ res2a = svds(A, k, solver=self.solver, random_state=random_state, maxiter=1000)
410
+ _check_svds(A, k, *res1a, atol=2e-7)
411
+ _check_svds(A, k, *res2a, atol=2e-7)
412
+
413
+ message = "Arrays are not equal"
414
+ with pytest.raises(AssertionError, match=message):
415
+ assert_equal(res1a, res2a)
416
+
417
+ @pytest.mark.filterwarnings("ignore:Exited postprocessing")
418
+ def test_svd_maxiter(self):
419
+ # check that maxiter works as expected: should not return accurate
420
+ # solution after 1 iteration, but should with default `maxiter`
421
+ A = np.diag(np.arange(9)).astype(np.float64)
422
+ k = 1
423
+ u, s, vh = sorted_svd(A, k)
424
+ # Use default maxiter by default
425
+ maxiter = None
426
+
427
+ if self.solver == 'arpack':
428
+ message = "ARPACK error -1: No convergence"
429
+ with pytest.raises(ArpackNoConvergence, match=message):
430
+ svds(A, k, ncv=3, maxiter=1, solver=self.solver)
431
+ elif self.solver == 'lobpcg':
432
+ # Set maxiter higher so test passes without changing
433
+ # default and breaking backward compatibility (gh-20221)
434
+ maxiter = 30
435
+ with pytest.warns(UserWarning, match="Exited at iteration"):
436
+ svds(A, k, maxiter=1, solver=self.solver)
437
+ elif self.solver == 'propack':
438
+ message = "k=1 singular triplets did not converge within"
439
+ with pytest.raises(np.linalg.LinAlgError, match=message):
440
+ svds(A, k, maxiter=1, solver=self.solver)
441
+
442
+ ud, sd, vhd = svds(A, k, solver=self.solver, maxiter=maxiter,
443
+ random_state=0)
444
+ _check_svds(A, k, ud, sd, vhd, atol=1e-8)
445
+ assert_allclose(np.abs(ud), np.abs(u), atol=1e-8)
446
+ assert_allclose(np.abs(vhd), np.abs(vh), atol=1e-8)
447
+ assert_allclose(np.abs(sd), np.abs(s), atol=1e-9)
448
+
449
+ @pytest.mark.parametrize("rsv", (True, False, 'u', 'vh'))
450
+ @pytest.mark.parametrize("shape", ((5, 7), (6, 6), (7, 5)))
451
+ def test_svd_return_singular_vectors(self, rsv, shape):
452
+ # check that the return_singular_vectors parameter works as expected
453
+ rng = np.random.default_rng(0)
454
+ A = rng.random(shape)
455
+ k = 2
456
+ M, N = shape
457
+ u, s, vh = sorted_svd(A, k)
458
+
459
+ respect_u = True if self.solver == 'propack' else M <= N
460
+ respect_vh = True if self.solver == 'propack' else M > N
461
+
462
+ if self.solver == 'lobpcg':
463
+ with pytest.warns(UserWarning, match="The problem size"):
464
+ if rsv is False:
465
+ s2 = svds(A, k, return_singular_vectors=rsv,
466
+ solver=self.solver, random_state=rng)
467
+ assert_allclose(s2, s)
468
+ elif rsv == 'u' and respect_u:
469
+ u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
470
+ solver=self.solver, random_state=rng)
471
+ assert_allclose(np.abs(u2), np.abs(u))
472
+ assert_allclose(s2, s)
473
+ assert vh2 is None
474
+ elif rsv == 'vh' and respect_vh:
475
+ u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
476
+ solver=self.solver, random_state=rng)
477
+ assert u2 is None
478
+ assert_allclose(s2, s)
479
+ assert_allclose(np.abs(vh2), np.abs(vh))
480
+ else:
481
+ u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
482
+ solver=self.solver, random_state=rng)
483
+ if u2 is not None:
484
+ assert_allclose(np.abs(u2), np.abs(u))
485
+ assert_allclose(s2, s)
486
+ if vh2 is not None:
487
+ assert_allclose(np.abs(vh2), np.abs(vh))
488
+ else:
489
+ if rsv is False:
490
+ s2 = svds(A, k, return_singular_vectors=rsv,
491
+ solver=self.solver, random_state=rng)
492
+ assert_allclose(s2, s)
493
+ elif rsv == 'u' and respect_u:
494
+ u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
495
+ solver=self.solver, random_state=rng)
496
+ assert_allclose(np.abs(u2), np.abs(u))
497
+ assert_allclose(s2, s)
498
+ assert vh2 is None
499
+ elif rsv == 'vh' and respect_vh:
500
+ u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
501
+ solver=self.solver, random_state=rng)
502
+ assert u2 is None
503
+ assert_allclose(s2, s)
504
+ assert_allclose(np.abs(vh2), np.abs(vh))
505
+ else:
506
+ u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
507
+ solver=self.solver, random_state=rng)
508
+ if u2 is not None:
509
+ assert_allclose(np.abs(u2), np.abs(u))
510
+ assert_allclose(s2, s)
511
+ if vh2 is not None:
512
+ assert_allclose(np.abs(vh2), np.abs(vh))
513
+
514
+ # --- Test Basic Functionality ---
515
+ # Tests the accuracy of each solver for real and complex matrices provided
516
+ # as list, dense array, sparse matrix, and LinearOperator.
517
+
518
+ A1 = [[1, 2, 3], [3, 4, 3], [1 + 1j, 0, 2], [0, 0, 1]]
519
+ A2 = [[1, 2, 3, 8 + 5j], [3 - 2j, 4, 3, 5], [1, 0, 2, 3], [0, 0, 1, 0]]
520
+
521
+ @pytest.mark.filterwarnings("ignore:k >= N - 1",
522
+ reason="needed to demonstrate #16725")
523
+ @pytest.mark.parametrize('A', (A1, A2))
524
+ @pytest.mark.parametrize('k', range(1, 5))
525
+ # PROPACK fails a lot if @pytest.mark.parametrize('which', ("SM", "LM"))
526
+ @pytest.mark.parametrize('real', (True, False))
527
+ @pytest.mark.parametrize('transpose', (False, True))
528
+ # In gh-14299, it was suggested the `svds` should _not_ work with lists
529
+ @pytest.mark.parametrize('lo_type', (np.asarray, csc_matrix,
530
+ aslinearoperator))
531
+ def test_svd_simple(self, A, k, real, transpose, lo_type):
532
+
533
+ A = np.asarray(A)
534
+ A = np.real(A) if real else A
535
+ A = A.T if transpose else A
536
+ A2 = lo_type(A)
537
+
538
+ # could check for the appropriate errors, but that is tested above
539
+ if k > min(A.shape):
540
+ pytest.skip("`k` cannot be greater than `min(A.shape)`")
541
+ if self.solver != 'propack' and k >= min(A.shape):
542
+ pytest.skip("Only PROPACK supports complete SVD")
543
+ if self.solver == 'arpack' and not real and k == min(A.shape) - 1:
544
+ pytest.skip("#16725")
545
+
546
+ atol = 3e-10
547
+ if self.solver == 'propack':
548
+ atol = 3e-9 # otherwise test fails on Linux aarch64 (see gh-19855)
549
+
550
+ if self.solver == 'lobpcg':
551
+ with pytest.warns(UserWarning, match="The problem size"):
552
+ u, s, vh = svds(A2, k, solver=self.solver, random_state=0)
553
+ else:
554
+ u, s, vh = svds(A2, k, solver=self.solver, random_state=0)
555
+ _check_svds(A, k, u, s, vh, atol=atol)
556
+
557
+ def test_svd_linop(self):
558
+ solver = self.solver
559
+
560
+ nmks = [(6, 7, 3),
561
+ (9, 5, 4),
562
+ (10, 8, 5)]
563
+
564
+ def reorder(args):
565
+ U, s, VH = args
566
+ j = np.argsort(s)
567
+ return U[:, j], s[j], VH[j, :]
568
+
569
+ for n, m, k in nmks:
570
+ # Test svds on a LinearOperator.
571
+ A = np.random.RandomState(52).randn(n, m)
572
+ L = CheckingLinearOperator(A)
573
+
574
+ if solver == 'propack':
575
+ v0 = np.ones(n)
576
+ else:
577
+ v0 = np.ones(min(A.shape))
578
+ if solver == 'lobpcg':
579
+ with pytest.warns(UserWarning, match="The problem size"):
580
+ U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver,
581
+ random_state=0))
582
+ U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver,
583
+ random_state=0))
584
+ else:
585
+ U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver,
586
+ random_state=0))
587
+ U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver,
588
+ random_state=0))
589
+
590
+ assert_allclose(np.abs(U1), np.abs(U2))
591
+ assert_allclose(s1, s2)
592
+ assert_allclose(np.abs(VH1), np.abs(VH2))
593
+ assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
594
+ np.dot(U2, np.dot(np.diag(s2), VH2)))
595
+
596
+ # Try again with which="SM".
597
+ A = np.random.RandomState(1909).randn(n, m)
598
+ L = CheckingLinearOperator(A)
599
+
600
+ # TODO: arpack crashes when v0=v0, which="SM"
601
+ kwargs = {'v0': v0} if solver not in {None, 'arpack'} else {}
602
+ if self.solver == 'lobpcg':
603
+ with pytest.warns(UserWarning, match="The problem size"):
604
+ U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver,
605
+ random_state=0, **kwargs))
606
+ U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver,
607
+ random_state=0, **kwargs))
608
+ else:
609
+ U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver,
610
+ random_state=0, **kwargs))
611
+ U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver,
612
+ random_state=0, **kwargs))
613
+
614
+ assert_allclose(np.abs(U1), np.abs(U2))
615
+ assert_allclose(s1 + 1, s2 + 1)
616
+ assert_allclose(np.abs(VH1), np.abs(VH2))
617
+ assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
618
+ np.dot(U2, np.dot(np.diag(s2), VH2)))
619
+
620
+ if k < min(n, m) - 1:
621
+ # Complex input and explicit which="LM".
622
+ for (dt, eps) in [(complex, 1e-7), (np.complex64, 3e-3)]:
623
+ rng = np.random.RandomState(1648)
624
+ A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt)
625
+ L = CheckingLinearOperator(A)
626
+
627
+ if self.solver == 'lobpcg':
628
+ with pytest.warns(UserWarning,
629
+ match="The problem size"):
630
+ U1, s1, VH1 = reorder(svds(A, k, which="LM",
631
+ solver=solver,
632
+ random_state=0))
633
+ U2, s2, VH2 = reorder(svds(L, k, which="LM",
634
+ solver=solver,
635
+ random_state=0))
636
+ else:
637
+ U1, s1, VH1 = reorder(svds(A, k, which="LM",
638
+ solver=solver,
639
+ random_state=0))
640
+ U2, s2, VH2 = reorder(svds(L, k, which="LM",
641
+ solver=solver,
642
+ random_state=0))
643
+
644
+ assert_allclose(np.abs(U1), np.abs(U2), rtol=eps)
645
+ assert_allclose(s1, s2, rtol=eps)
646
+ assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps)
647
+ assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
648
+ np.dot(U2, np.dot(np.diag(s2), VH2)),
649
+ rtol=eps)
650
+
651
+ SHAPES = ((100, 100), (100, 101), (101, 100))
652
+
653
+ @pytest.mark.filterwarnings("ignore:Exited at iteration")
654
+ @pytest.mark.filterwarnings("ignore:Exited postprocessing")
655
+ @pytest.mark.parametrize("shape", SHAPES)
656
+ # ARPACK supports only dtype float, complex, or np.float32
657
+ @pytest.mark.parametrize("dtype", (float, complex, np.float32))
658
+ def test_small_sigma_sparse(self, shape, dtype):
659
+ # https://github.com/scipy/scipy/pull/11829
660
+ solver = self.solver
661
+ # 2do: PROPACK fails orthogonality of singular vectors
662
+ # if dtype == complex and self.solver == 'propack':
663
+ # pytest.skip("PROPACK unsupported for complex dtype")
664
+ rng = np.random.default_rng(0)
665
+ k = 5
666
+ (m, n) = shape
667
+ S = random(m, n, density=0.1, random_state=rng)
668
+ if dtype == complex:
669
+ S = + 1j * random(m, n, density=0.1, random_state=rng)
670
+ e = np.ones(m)
671
+ e[0:5] *= 1e1 ** np.arange(-5, 0, 1)
672
+ S = spdiags(e, 0, m, m) @ S
673
+ S = S.astype(dtype)
674
+ u, s, vh = svds(S, k, which='SM', solver=solver, maxiter=1000,
675
+ random_state=0)
676
+ c_svd = False # partial SVD can be different from full SVD
677
+ _check_svds_n(S, k, u, s, vh, which="SM", check_svd=c_svd, atol=2e-1)
678
+
679
+ # --- Test Edge Cases ---
680
+ # Checks a few edge cases.
681
+
682
+ @pytest.mark.parametrize("shape", ((6, 5), (5, 5), (5, 6)))
683
+ @pytest.mark.parametrize("dtype", (float, complex))
684
+ def test_svd_LM_ones_matrix(self, shape, dtype):
685
+ # Check that svds can deal with matrix_rank less than k in LM mode.
686
+ k = 3
687
+ n, m = shape
688
+ A = np.ones((n, m), dtype=dtype)
689
+
690
+ if self.solver == 'lobpcg':
691
+ with pytest.warns(UserWarning, match="The problem size"):
692
+ U, s, VH = svds(A, k, solver=self.solver, random_state=0)
693
+ else:
694
+ U, s, VH = svds(A, k, solver=self.solver, random_state=0)
695
+
696
+ _check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False)
697
+
698
+ # Check that the largest singular value is near sqrt(n*m)
699
+ # and the other singular values have been forced to zero.
700
+ assert_allclose(np.max(s), np.sqrt(n*m))
701
+ s = np.array(sorted(s)[:-1]) + 1
702
+ z = np.ones_like(s)
703
+ assert_allclose(s, z)
704
+
705
+ @pytest.mark.filterwarnings("ignore:k >= N - 1",
706
+ reason="needed to demonstrate #16725")
707
+ @pytest.mark.parametrize("shape", ((3, 4), (4, 4), (4, 3), (4, 2)))
708
+ @pytest.mark.parametrize("dtype", (float, complex))
709
+ def test_zero_matrix(self, shape, dtype):
710
+ # Check that svds can deal with matrices containing only zeros;
711
+ # see https://github.com/scipy/scipy/issues/3452/
712
+ # shape = (4, 2) is included because it is the particular case
713
+ # reported in the issue
714
+ k = 1
715
+ n, m = shape
716
+ A = np.zeros((n, m), dtype=dtype)
717
+
718
+ if (self.solver == 'arpack' and dtype is complex
719
+ and k == min(A.shape) - 1):
720
+ pytest.skip("#16725")
721
+
722
+ if self.solver == 'propack':
723
+ pytest.skip("PROPACK failures unrelated to PR #16712")
724
+
725
+ if self.solver == 'lobpcg':
726
+ with pytest.warns(UserWarning, match="The problem size"):
727
+ U, s, VH = svds(A, k, solver=self.solver, random_state=0)
728
+ else:
729
+ U, s, VH = svds(A, k, solver=self.solver, random_state=0)
730
+
731
+ # Check some generic properties of svd.
732
+ _check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False)
733
+
734
+ # Check that the singular values are zero.
735
+ assert_array_equal(s, 0)
736
+
737
+ @pytest.mark.parametrize("shape", ((20, 20), (20, 21), (21, 20)))
738
+ # ARPACK supports only dtype float, complex, or np.float32
739
+ @pytest.mark.parametrize("dtype", (float, complex, np.float32))
740
+ @pytest.mark.filterwarnings("ignore:Exited",
741
+ reason="Ignore LOBPCG early exit.")
742
+ def test_small_sigma(self, shape, dtype):
743
+ rng = np.random.default_rng(179847540)
744
+ A = rng.random(shape).astype(dtype)
745
+ u, _, vh = svd(A, full_matrices=False)
746
+ if dtype == np.float32:
747
+ e = 10.0
748
+ else:
749
+ e = 100.0
750
+ t = e**(-np.arange(len(vh))).astype(dtype)
751
+ A = (u*t).dot(vh)
752
+ k = 4
753
+ u, s, vh = svds(A, k, solver=self.solver, maxiter=100, random_state=0)
754
+ t = np.sum(s > 0)
755
+ assert_equal(t, k)
756
+ # LOBPCG needs larger atol and rtol to pass
757
+ _check_svds_n(A, k, u, s, vh, atol=1e-3, rtol=1e0, check_svd=False)
758
+
759
+ # ARPACK supports only dtype float, complex, or np.float32
760
+ @pytest.mark.filterwarnings("ignore:The problem size")
761
+ @pytest.mark.parametrize("dtype", (float, complex, np.float32))
762
+ def test_small_sigma2(self, dtype):
763
+ rng = np.random.default_rng(179847540)
764
+ # create a 10x10 singular matrix with a 4-dim null space
765
+ dim = 4
766
+ size = 10
767
+ x = rng.random((size, size-dim))
768
+ y = x[:, :dim] * rng.random(dim)
769
+ mat = np.hstack((x, y))
770
+ mat = mat.astype(dtype)
771
+
772
+ nz = null_space(mat)
773
+ assert_equal(nz.shape[1], dim)
774
+
775
+ # Tolerances atol and rtol adjusted to pass np.float32
776
+ # Use non-sparse svd
777
+ u, s, vh = svd(mat)
778
+ # Singular values are 0:
779
+ assert_allclose(s[-dim:], 0, atol=1e-6, rtol=1e0)
780
+ # Smallest right singular vectors in null space:
781
+ assert_allclose(mat @ vh[-dim:, :].T, 0, atol=1e-6, rtol=1e0)
782
+
783
+ # Smallest singular values should be 0
784
+ sp_mat = csc_matrix(mat)
785
+ su, ss, svh = svds(sp_mat, k=dim, which='SM', solver=self.solver,
786
+ random_state=0)
787
+ # Smallest dim singular values are 0:
788
+ assert_allclose(ss, 0, atol=1e-5, rtol=1e0)
789
+ # Smallest singular vectors via svds in null space:
790
+ n, m = mat.shape
791
+ if n < m: # else the assert fails with some libraries unclear why
792
+ assert_allclose(sp_mat.transpose() @ su, 0, atol=1e-5, rtol=1e0)
793
+ assert_allclose(sp_mat @ svh.T, 0, atol=1e-5, rtol=1e0)
794
+
795
+ # --- Perform tests with each solver ---
796
+
797
+
798
+ class Test_SVDS_once:
799
+ @pytest.mark.parametrize("solver", ['ekki', object])
800
+ def test_svds_input_validation_solver(self, solver):
801
+ message = "solver must be one of"
802
+ with pytest.raises(ValueError, match=message):
803
+ svds(np.ones((3, 4)), k=2, solver=solver)
804
+
805
+
806
+ class Test_SVDS_ARPACK(SVDSCommonTests):
807
+
808
+ def setup_method(self):
809
+ self.solver = 'arpack'
810
+
811
+ @pytest.mark.parametrize("ncv", list(range(-1, 8)) + [4.5, "5"])
812
+ def test_svds_input_validation_ncv_1(self, ncv):
813
+ rng = np.random.default_rng(0)
814
+ A = rng.random((6, 7))
815
+ k = 3
816
+ if ncv in {4, 5}:
817
+ u, s, vh = svds(A, k=k, ncv=ncv, solver=self.solver, random_state=0)
818
+ # partial decomposition, so don't check that u@diag(s)@vh=A;
819
+ # do check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
820
+ _check_svds(A, k, u, s, vh)
821
+ else:
822
+ message = ("`ncv` must be an integer satisfying")
823
+ with pytest.raises(ValueError, match=message):
824
+ svds(A, k=k, ncv=ncv, solver=self.solver)
825
+
826
+ def test_svds_input_validation_ncv_2(self):
827
+ # I think the stack trace is reasonable when `ncv` can't be converted
828
+ # to an int.
829
+ message = "int() argument must be a"
830
+ with pytest.raises(TypeError, match=re.escape(message)):
831
+ svds(np.eye(10), ncv=[], solver=self.solver)
832
+
833
+ message = "invalid literal for int()"
834
+ with pytest.raises(ValueError, match=message):
835
+ svds(np.eye(10), ncv="hi", solver=self.solver)
836
+
837
+ # I can't see a robust relationship between `ncv` and relevant outputs
838
+ # (e.g. accuracy, time), so no test of the parameter.
839
+
840
+
841
+ class Test_SVDS_LOBPCG(SVDSCommonTests):
842
+
843
+ def setup_method(self):
844
+ self.solver = 'lobpcg'
845
+
846
+
847
+ class Test_SVDS_PROPACK(SVDSCommonTests):
848
+
849
+ def setup_method(self):
850
+ self.solver = 'propack'
851
+
852
+ def test_svd_LM_ones_matrix(self):
853
+ message = ("PROPACK does not return orthonormal singular vectors "
854
+ "associated with zero singular values.")
855
+ # There are some other issues with this matrix of all ones, e.g.
856
+ # `which='sm'` and `k=1` returns the largest singular value
857
+ pytest.xfail(message)
858
+
859
+ def test_svd_LM_zeros_matrix(self):
860
+ message = ("PROPACK does not return orthonormal singular vectors "
861
+ "associated with zero singular values.")
862
+ pytest.xfail(message)
llmeval-env/lib/python3.10/site-packages/scipy/spatial/__init__.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =============================================================
3
+ Spatial algorithms and data structures (:mod:`scipy.spatial`)
4
+ =============================================================
5
+
6
+ .. currentmodule:: scipy.spatial
7
+
8
+ .. toctree::
9
+ :hidden:
10
+
11
+ spatial.distance
12
+
13
+ Spatial transformations
14
+ =======================
15
+
16
+ These are contained in the `scipy.spatial.transform` submodule.
17
+
18
+ Nearest-neighbor queries
19
+ ========================
20
+ .. autosummary::
21
+ :toctree: generated/
22
+
23
+ KDTree -- class for efficient nearest-neighbor queries
24
+ cKDTree -- class for efficient nearest-neighbor queries (faster implementation)
25
+ Rectangle
26
+
27
+ Distance metrics
28
+ ================
29
+
30
+ Distance metrics are contained in the :mod:`scipy.spatial.distance` submodule.
31
+
32
+ Delaunay triangulation, convex hulls, and Voronoi diagrams
33
+ ==========================================================
34
+
35
+ .. autosummary::
36
+ :toctree: generated/
37
+
38
+ Delaunay -- compute Delaunay triangulation of input points
39
+ ConvexHull -- compute a convex hull for input points
40
+ Voronoi -- compute a Voronoi diagram hull from input points
41
+ SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere
42
+ HalfspaceIntersection -- compute the intersection points of input halfspaces
43
+
44
+ Plotting helpers
45
+ ================
46
+
47
+ .. autosummary::
48
+ :toctree: generated/
49
+
50
+ delaunay_plot_2d -- plot 2-D triangulation
51
+ convex_hull_plot_2d -- plot 2-D convex hull
52
+ voronoi_plot_2d -- plot 2-D Voronoi diagram
53
+
54
+ .. seealso:: :ref:`Tutorial <qhulltutorial>`
55
+
56
+
57
+ Simplex representation
58
+ ======================
59
+ The simplices (triangles, tetrahedra, etc.) appearing in the Delaunay
60
+ tessellation (N-D simplices), convex hull facets, and Voronoi ridges
61
+ (N-1-D simplices) are represented in the following scheme::
62
+
63
+ tess = Delaunay(points)
64
+ hull = ConvexHull(points)
65
+ voro = Voronoi(points)
66
+
67
+ # coordinates of the jth vertex of the ith simplex
68
+ tess.points[tess.simplices[i, j], :] # tessellation element
69
+ hull.points[hull.simplices[i, j], :] # convex hull facet
70
+ voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells
71
+
72
+ For Delaunay triangulations and convex hulls, the neighborhood
73
+ structure of the simplices satisfies the condition:
74
+ ``tess.neighbors[i,j]`` is the neighboring simplex of the ith
75
+ simplex, opposite to the ``j``-vertex. It is -1 in case of no neighbor.
76
+
77
+ Convex hull facets also define a hyperplane equation::
78
+
79
+ (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0
80
+
81
+ Similar hyperplane equations for the Delaunay triangulation correspond
82
+ to the convex hull facets on the corresponding N+1-D
83
+ paraboloid.
84
+
85
+ The Delaunay triangulation objects offer a method for locating the
86
+ simplex containing a given point, and barycentric coordinate
87
+ computations.
88
+
89
+ Functions
90
+ ---------
91
+
92
+ .. autosummary::
93
+ :toctree: generated/
94
+
95
+ tsearch
96
+ distance_matrix
97
+ minkowski_distance
98
+ minkowski_distance_p
99
+ procrustes
100
+ geometric_slerp
101
+
102
+ Warnings / Errors used in :mod:`scipy.spatial`
103
+ ----------------------------------------------
104
+ .. autosummary::
105
+ :toctree: generated/
106
+
107
+ QhullError
108
+ """ # noqa: E501
109
+
110
+ from ._kdtree import *
111
+ from ._ckdtree import *
112
+ from ._qhull import *
113
+ from ._spherical_voronoi import SphericalVoronoi
114
+ from ._plotutils import *
115
+ from ._procrustes import procrustes
116
+ from ._geometric_slerp import geometric_slerp
117
+
118
+ # Deprecated namespaces, to be removed in v2.0.0
119
+ from . import ckdtree, kdtree, qhull
120
+
121
+ __all__ = [s for s in dir() if not s.startswith('_')]
122
+
123
+ from . import distance, transform
124
+
125
+ __all__ += ['distance', 'transform']
126
+
127
+ from scipy._lib._testutils import PytestTester
128
+ test = PytestTester(__name__)
129
+ del PytestTester
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_ckdtree.pyi ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import (
3
+ Any,
4
+ Generic,
5
+ overload,
6
+ TypeVar,
7
+ )
8
+
9
+ import numpy as np
10
+ import numpy.typing as npt
11
+ from scipy.sparse import coo_matrix, dok_matrix
12
+
13
+ from typing import Literal
14
+
15
+ # TODO: Replace `ndarray` with a 1D float64 array when possible
16
+ _BoxType = TypeVar("_BoxType", None, npt.NDArray[np.float64])
17
+
18
+ # Copied from `numpy.typing._scalar_like._ScalarLike`
19
+ # TODO: Expand with 0D arrays once we have shape support
20
+ _ArrayLike0D = bool | int | float | complex | str | bytes | np.generic
21
+
22
+ _WeightType = npt.ArrayLike | tuple[npt.ArrayLike | None, npt.ArrayLike | None]
23
+
24
+ class cKDTreeNode:
25
+ @property
26
+ def data_points(self) -> npt.NDArray[np.float64]: ...
27
+ @property
28
+ def indices(self) -> npt.NDArray[np.intp]: ...
29
+
30
+ # These are read-only attributes in cython, which behave like properties
31
+ @property
32
+ def level(self) -> int: ...
33
+ @property
34
+ def split_dim(self) -> int: ...
35
+ @property
36
+ def children(self) -> int: ...
37
+ @property
38
+ def start_idx(self) -> int: ...
39
+ @property
40
+ def end_idx(self) -> int: ...
41
+ @property
42
+ def split(self) -> float: ...
43
+ @property
44
+ def lesser(self) -> cKDTreeNode | None: ...
45
+ @property
46
+ def greater(self) -> cKDTreeNode | None: ...
47
+
48
+ class cKDTree(Generic[_BoxType]):
49
+ @property
50
+ def n(self) -> int: ...
51
+ @property
52
+ def m(self) -> int: ...
53
+ @property
54
+ def leafsize(self) -> int: ...
55
+ @property
56
+ def size(self) -> int: ...
57
+ @property
58
+ def tree(self) -> cKDTreeNode: ...
59
+
60
+ # These are read-only attributes in cython, which behave like properties
61
+ @property
62
+ def data(self) -> npt.NDArray[np.float64]: ...
63
+ @property
64
+ def maxes(self) -> npt.NDArray[np.float64]: ...
65
+ @property
66
+ def mins(self) -> npt.NDArray[np.float64]: ...
67
+ @property
68
+ def indices(self) -> npt.NDArray[np.float64]: ...
69
+ @property
70
+ def boxsize(self) -> _BoxType: ...
71
+
72
+ # NOTE: In practice `__init__` is used as constructor, not `__new__`.
73
+ # The latter gives us more flexibility in setting the generic parameter
74
+ # though.
75
+ @overload
76
+ def __new__( # type: ignore[misc]
77
+ cls,
78
+ data: npt.ArrayLike,
79
+ leafsize: int = ...,
80
+ compact_nodes: bool = ...,
81
+ copy_data: bool = ...,
82
+ balanced_tree: bool = ...,
83
+ boxsize: None = ...,
84
+ ) -> cKDTree[None]: ...
85
+ @overload
86
+ def __new__(
87
+ cls,
88
+ data: npt.ArrayLike,
89
+ leafsize: int = ...,
90
+ compact_nodes: bool = ...,
91
+ copy_data: bool = ...,
92
+ balanced_tree: bool = ...,
93
+ boxsize: npt.ArrayLike = ...,
94
+ ) -> cKDTree[npt.NDArray[np.float64]]: ...
95
+
96
+ # TODO: returns a 2-tuple of scalars if `x.ndim == 1` and `k == 1`,
97
+ # returns a 2-tuple of arrays otherwise
98
+ def query(
99
+ self,
100
+ x: npt.ArrayLike,
101
+ k: npt.ArrayLike = ...,
102
+ eps: float = ...,
103
+ p: float = ...,
104
+ distance_upper_bound: float = ...,
105
+ workers: int | None = ...,
106
+ ) -> tuple[Any, Any]: ...
107
+
108
+ # TODO: returns a list scalars if `x.ndim <= 1`,
109
+ # returns an object array of lists otherwise
110
+ def query_ball_point(
111
+ self,
112
+ x: npt.ArrayLike,
113
+ r: npt.ArrayLike,
114
+ p: float,
115
+ eps: float = ...,
116
+ workers: int | None = ...,
117
+ return_sorted: bool | None = ...,
118
+ return_length: bool = ...
119
+ ) -> Any: ...
120
+
121
+ def query_ball_tree(
122
+ self,
123
+ other: cKDTree,
124
+ r: float,
125
+ p: float,
126
+ eps: float = ...,
127
+ ) -> list[list[int]]: ...
128
+
129
+ @overload
130
+ def query_pairs( # type: ignore[misc]
131
+ self,
132
+ r: float,
133
+ p: float = ...,
134
+ eps: float = ...,
135
+ output_type: Literal["set"] = ...,
136
+ ) -> set[tuple[int, int]]: ...
137
+ @overload
138
+ def query_pairs(
139
+ self,
140
+ r: float,
141
+ p: float = ...,
142
+ eps: float = ...,
143
+ output_type: Literal["ndarray"] = ...,
144
+ ) -> npt.NDArray[np.intp]: ...
145
+
146
+ @overload
147
+ def count_neighbors( # type: ignore[misc]
148
+ self,
149
+ other: cKDTree,
150
+ r: _ArrayLike0D,
151
+ p: float = ...,
152
+ weights: None | tuple[None, None] = ...,
153
+ cumulative: bool = ...,
154
+ ) -> int: ...
155
+ @overload
156
+ def count_neighbors( # type: ignore[misc]
157
+ self,
158
+ other: cKDTree,
159
+ r: _ArrayLike0D,
160
+ p: float = ...,
161
+ weights: _WeightType = ...,
162
+ cumulative: bool = ...,
163
+ ) -> np.float64: ...
164
+ @overload
165
+ def count_neighbors( # type: ignore[misc]
166
+ self,
167
+ other: cKDTree,
168
+ r: npt.ArrayLike,
169
+ p: float = ...,
170
+ weights: None | tuple[None, None] = ...,
171
+ cumulative: bool = ...,
172
+ ) -> npt.NDArray[np.intp]: ...
173
+ @overload
174
+ def count_neighbors(
175
+ self,
176
+ other: cKDTree,
177
+ r: npt.ArrayLike,
178
+ p: float = ...,
179
+ weights: _WeightType = ...,
180
+ cumulative: bool = ...,
181
+ ) -> npt.NDArray[np.float64]: ...
182
+
183
+ @overload
184
+ def sparse_distance_matrix( # type: ignore[misc]
185
+ self,
186
+ other: cKDTree,
187
+ max_distance: float,
188
+ p: float = ...,
189
+ output_type: Literal["dok_matrix"] = ...,
190
+ ) -> dok_matrix: ...
191
+ @overload
192
+ def sparse_distance_matrix( # type: ignore[misc]
193
+ self,
194
+ other: cKDTree,
195
+ max_distance: float,
196
+ p: float = ...,
197
+ output_type: Literal["coo_matrix"] = ...,
198
+ ) -> coo_matrix: ...
199
+ @overload
200
+ def sparse_distance_matrix( # type: ignore[misc]
201
+ self,
202
+ other: cKDTree,
203
+ max_distance: float,
204
+ p: float = ...,
205
+ output_type: Literal["dict"] = ...,
206
+ ) -> dict[tuple[int, int], float]: ...
207
+ @overload
208
+ def sparse_distance_matrix(
209
+ self,
210
+ other: cKDTree,
211
+ max_distance: float,
212
+ p: float = ...,
213
+ output_type: Literal["ndarray"] = ...,
214
+ ) -> npt.NDArray[np.void]: ...
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (641 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (113 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_geometric_slerp.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ __all__ = ['geometric_slerp']
4
+
5
+ import warnings
6
+ from typing import TYPE_CHECKING
7
+
8
+ import numpy as np
9
+ from scipy.spatial.distance import euclidean
10
+
11
+ if TYPE_CHECKING:
12
+ import numpy.typing as npt
13
+
14
+
15
+ def _geometric_slerp(start, end, t):
16
+ # create an orthogonal basis using QR decomposition
17
+ basis = np.vstack([start, end])
18
+ Q, R = np.linalg.qr(basis.T)
19
+ signs = 2 * (np.diag(R) >= 0) - 1
20
+ Q = Q.T * signs.T[:, np.newaxis]
21
+ R = R.T * signs.T[:, np.newaxis]
22
+
23
+ # calculate the angle between `start` and `end`
24
+ c = np.dot(start, end)
25
+ s = np.linalg.det(R)
26
+ omega = np.arctan2(s, c)
27
+
28
+ # interpolate
29
+ start, end = Q
30
+ s = np.sin(t * omega)
31
+ c = np.cos(t * omega)
32
+ return start * c[:, np.newaxis] + end * s[:, np.newaxis]
33
+
34
+
35
+ def geometric_slerp(
36
+ start: npt.ArrayLike,
37
+ end: npt.ArrayLike,
38
+ t: npt.ArrayLike,
39
+ tol: float = 1e-7,
40
+ ) -> np.ndarray:
41
+ """
42
+ Geometric spherical linear interpolation.
43
+
44
+ The interpolation occurs along a unit-radius
45
+ great circle arc in arbitrary dimensional space.
46
+
47
+ Parameters
48
+ ----------
49
+ start : (n_dimensions, ) array-like
50
+ Single n-dimensional input coordinate in a 1-D array-like
51
+ object. `n` must be greater than 1.
52
+ end : (n_dimensions, ) array-like
53
+ Single n-dimensional input coordinate in a 1-D array-like
54
+ object. `n` must be greater than 1.
55
+ t : float or (n_points,) 1D array-like
56
+ A float or 1D array-like of doubles representing interpolation
57
+ parameters, with values required in the inclusive interval
58
+ between 0 and 1. A common approach is to generate the array
59
+ with ``np.linspace(0, 1, n_pts)`` for linearly spaced points.
60
+ Ascending, descending, and scrambled orders are permitted.
61
+ tol : float
62
+ The absolute tolerance for determining if the start and end
63
+ coordinates are antipodes.
64
+
65
+ Returns
66
+ -------
67
+ result : (t.size, D)
68
+ An array of doubles containing the interpolated
69
+ spherical path and including start and
70
+ end when 0 and 1 t are used. The
71
+ interpolated values should correspond to the
72
+ same sort order provided in the t array. The result
73
+ may be 1-dimensional if ``t`` is a float.
74
+
75
+ Raises
76
+ ------
77
+ ValueError
78
+ If ``start`` and ``end`` are antipodes, not on the
79
+ unit n-sphere, or for a variety of degenerate conditions.
80
+
81
+ See Also
82
+ --------
83
+ scipy.spatial.transform.Slerp : 3-D Slerp that works with quaternions
84
+
85
+ Notes
86
+ -----
87
+ The implementation is based on the mathematical formula provided in [1]_,
88
+ and the first known presentation of this algorithm, derived from study of
89
+ 4-D geometry, is credited to Glenn Davis in a footnote of the original
90
+ quaternion Slerp publication by Ken Shoemake [2]_.
91
+
92
+ .. versionadded:: 1.5.0
93
+
94
+ References
95
+ ----------
96
+ .. [1] https://en.wikipedia.org/wiki/Slerp#Geometric_Slerp
97
+ .. [2] Ken Shoemake (1985) Animating rotation with quaternion curves.
98
+ ACM SIGGRAPH Computer Graphics, 19(3): 245-254.
99
+
100
+ Examples
101
+ --------
102
+ Interpolate four linearly-spaced values on the circumference of
103
+ a circle spanning 90 degrees:
104
+
105
+ >>> import numpy as np
106
+ >>> from scipy.spatial import geometric_slerp
107
+ >>> import matplotlib.pyplot as plt
108
+ >>> fig = plt.figure()
109
+ >>> ax = fig.add_subplot(111)
110
+ >>> start = np.array([1, 0])
111
+ >>> end = np.array([0, 1])
112
+ >>> t_vals = np.linspace(0, 1, 4)
113
+ >>> result = geometric_slerp(start,
114
+ ... end,
115
+ ... t_vals)
116
+
117
+ The interpolated results should be at 30 degree intervals
118
+ recognizable on the unit circle:
119
+
120
+ >>> ax.scatter(result[...,0], result[...,1], c='k')
121
+ >>> circle = plt.Circle((0, 0), 1, color='grey')
122
+ >>> ax.add_artist(circle)
123
+ >>> ax.set_aspect('equal')
124
+ >>> plt.show()
125
+
126
+ Attempting to interpolate between antipodes on a circle is
127
+ ambiguous because there are two possible paths, and on a
128
+ sphere there are infinite possible paths on the geodesic surface.
129
+ Nonetheless, one of the ambiguous paths is returned along
130
+ with a warning:
131
+
132
+ >>> opposite_pole = np.array([-1, 0])
133
+ >>> with np.testing.suppress_warnings() as sup:
134
+ ... sup.filter(UserWarning)
135
+ ... geometric_slerp(start,
136
+ ... opposite_pole,
137
+ ... t_vals)
138
+ array([[ 1.00000000e+00, 0.00000000e+00],
139
+ [ 5.00000000e-01, 8.66025404e-01],
140
+ [-5.00000000e-01, 8.66025404e-01],
141
+ [-1.00000000e+00, 1.22464680e-16]])
142
+
143
+ Extend the original example to a sphere and plot interpolation
144
+ points in 3D:
145
+
146
+ >>> from mpl_toolkits.mplot3d import proj3d
147
+ >>> fig = plt.figure()
148
+ >>> ax = fig.add_subplot(111, projection='3d')
149
+
150
+ Plot the unit sphere for reference (optional):
151
+
152
+ >>> u = np.linspace(0, 2 * np.pi, 100)
153
+ >>> v = np.linspace(0, np.pi, 100)
154
+ >>> x = np.outer(np.cos(u), np.sin(v))
155
+ >>> y = np.outer(np.sin(u), np.sin(v))
156
+ >>> z = np.outer(np.ones(np.size(u)), np.cos(v))
157
+ >>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
158
+
159
+ Interpolating over a larger number of points
160
+ may provide the appearance of a smooth curve on
161
+ the surface of the sphere, which is also useful
162
+ for discretized integration calculations on a
163
+ sphere surface:
164
+
165
+ >>> start = np.array([1, 0, 0])
166
+ >>> end = np.array([0, 0, 1])
167
+ >>> t_vals = np.linspace(0, 1, 200)
168
+ >>> result = geometric_slerp(start,
169
+ ... end,
170
+ ... t_vals)
171
+ >>> ax.plot(result[...,0],
172
+ ... result[...,1],
173
+ ... result[...,2],
174
+ ... c='k')
175
+ >>> plt.show()
176
+ """
177
+
178
+ start = np.asarray(start, dtype=np.float64)
179
+ end = np.asarray(end, dtype=np.float64)
180
+ t = np.asarray(t)
181
+
182
+ if t.ndim > 1:
183
+ raise ValueError("The interpolation parameter "
184
+ "value must be one dimensional.")
185
+
186
+ if start.ndim != 1 or end.ndim != 1:
187
+ raise ValueError("Start and end coordinates "
188
+ "must be one-dimensional")
189
+
190
+ if start.size != end.size:
191
+ raise ValueError("The dimensions of start and "
192
+ "end must match (have same size)")
193
+
194
+ if start.size < 2 or end.size < 2:
195
+ raise ValueError("The start and end coordinates must "
196
+ "both be in at least two-dimensional "
197
+ "space")
198
+
199
+ if np.array_equal(start, end):
200
+ return np.linspace(start, start, t.size)
201
+
202
+ # for points that violate equation for n-sphere
203
+ for coord in [start, end]:
204
+ if not np.allclose(np.linalg.norm(coord), 1.0,
205
+ rtol=1e-9,
206
+ atol=0):
207
+ raise ValueError("start and end are not"
208
+ " on a unit n-sphere")
209
+
210
+ if not isinstance(tol, float):
211
+ raise ValueError("tol must be a float")
212
+ else:
213
+ tol = np.fabs(tol)
214
+
215
+ coord_dist = euclidean(start, end)
216
+
217
+ # diameter of 2 within tolerance means antipodes, which is a problem
218
+ # for all unit n-spheres (even the 0-sphere would have an ambiguous path)
219
+ if np.allclose(coord_dist, 2.0, rtol=0, atol=tol):
220
+ warnings.warn("start and end are antipodes "
221
+ "using the specified tolerance; "
222
+ "this may cause ambiguous slerp paths",
223
+ stacklevel=2)
224
+
225
+ t = np.asarray(t, dtype=np.float64)
226
+
227
+ if t.size == 0:
228
+ return np.empty((0, start.size))
229
+
230
+ if t.min() < 0 or t.max() > 1:
231
+ raise ValueError("interpolation parameter must be in [0, 1]")
232
+
233
+ if t.ndim == 0:
234
+ return _geometric_slerp(start,
235
+ end,
236
+ np.atleast_1d(t)).ravel()
237
+ else:
238
+ return _geometric_slerp(start,
239
+ end,
240
+ t)
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_hausdorff.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (250 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_kdtree.py ADDED
@@ -0,0 +1,920 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Anne M. Archibald 2008
2
+ # Released under the scipy license
3
+ import numpy as np
4
+ from ._ckdtree import cKDTree, cKDTreeNode
5
+
6
+ __all__ = ['minkowski_distance_p', 'minkowski_distance',
7
+ 'distance_matrix',
8
+ 'Rectangle', 'KDTree']
9
+
10
+
11
+ def minkowski_distance_p(x, y, p=2):
12
+ """Compute the pth power of the L**p distance between two arrays.
13
+
14
+ For efficiency, this function computes the L**p distance but does
15
+ not extract the pth root. If `p` is 1 or infinity, this is equal to
16
+ the actual L**p distance.
17
+
18
+ The last dimensions of `x` and `y` must be the same length. Any
19
+ other dimensions must be compatible for broadcasting.
20
+
21
+ Parameters
22
+ ----------
23
+ x : (..., K) array_like
24
+ Input array.
25
+ y : (..., K) array_like
26
+ Input array.
27
+ p : float, 1 <= p <= infinity
28
+ Which Minkowski p-norm to use.
29
+
30
+ Returns
31
+ -------
32
+ dist : ndarray
33
+ pth power of the distance between the input arrays.
34
+
35
+ Examples
36
+ --------
37
+ >>> from scipy.spatial import minkowski_distance_p
38
+ >>> minkowski_distance_p([[0, 0], [0, 0]], [[1, 1], [0, 1]])
39
+ array([2, 1])
40
+
41
+ """
42
+ x = np.asarray(x)
43
+ y = np.asarray(y)
44
+
45
+ # Find smallest common datatype with float64 (return type of this
46
+ # function) - addresses #10262.
47
+ # Don't just cast to float64 for complex input case.
48
+ common_datatype = np.promote_types(np.promote_types(x.dtype, y.dtype),
49
+ 'float64')
50
+
51
+ # Make sure x and y are NumPy arrays of correct datatype.
52
+ x = x.astype(common_datatype)
53
+ y = y.astype(common_datatype)
54
+
55
+ if p == np.inf:
56
+ return np.amax(np.abs(y-x), axis=-1)
57
+ elif p == 1:
58
+ return np.sum(np.abs(y-x), axis=-1)
59
+ else:
60
+ return np.sum(np.abs(y-x)**p, axis=-1)
61
+
62
+
63
+ def minkowski_distance(x, y, p=2):
64
+ """Compute the L**p distance between two arrays.
65
+
66
+ The last dimensions of `x` and `y` must be the same length. Any
67
+ other dimensions must be compatible for broadcasting.
68
+
69
+ Parameters
70
+ ----------
71
+ x : (..., K) array_like
72
+ Input array.
73
+ y : (..., K) array_like
74
+ Input array.
75
+ p : float, 1 <= p <= infinity
76
+ Which Minkowski p-norm to use.
77
+
78
+ Returns
79
+ -------
80
+ dist : ndarray
81
+ Distance between the input arrays.
82
+
83
+ Examples
84
+ --------
85
+ >>> from scipy.spatial import minkowski_distance
86
+ >>> minkowski_distance([[0, 0], [0, 0]], [[1, 1], [0, 1]])
87
+ array([ 1.41421356, 1. ])
88
+
89
+ """
90
+ x = np.asarray(x)
91
+ y = np.asarray(y)
92
+ if p == np.inf or p == 1:
93
+ return minkowski_distance_p(x, y, p)
94
+ else:
95
+ return minkowski_distance_p(x, y, p)**(1./p)
96
+
97
+
98
+ class Rectangle:
99
+ """Hyperrectangle class.
100
+
101
+ Represents a Cartesian product of intervals.
102
+ """
103
+ def __init__(self, maxes, mins):
104
+ """Construct a hyperrectangle."""
105
+ self.maxes = np.maximum(maxes,mins).astype(float)
106
+ self.mins = np.minimum(maxes,mins).astype(float)
107
+ self.m, = self.maxes.shape
108
+
109
+ def __repr__(self):
110
+ return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
111
+
112
+ def volume(self):
113
+ """Total volume."""
114
+ return np.prod(self.maxes-self.mins)
115
+
116
+ def split(self, d, split):
117
+ """Produce two hyperrectangles by splitting.
118
+
119
+ In general, if you need to compute maximum and minimum
120
+ distances to the children, it can be done more efficiently
121
+ by updating the maximum and minimum distances to the parent.
122
+
123
+ Parameters
124
+ ----------
125
+ d : int
126
+ Axis to split hyperrectangle along.
127
+ split : float
128
+ Position along axis `d` to split at.
129
+
130
+ """
131
+ mid = np.copy(self.maxes)
132
+ mid[d] = split
133
+ less = Rectangle(self.mins, mid)
134
+ mid = np.copy(self.mins)
135
+ mid[d] = split
136
+ greater = Rectangle(mid, self.maxes)
137
+ return less, greater
138
+
139
+ def min_distance_point(self, x, p=2.):
140
+ """
141
+ Return the minimum distance between input and points in the
142
+ hyperrectangle.
143
+
144
+ Parameters
145
+ ----------
146
+ x : array_like
147
+ Input.
148
+ p : float, optional
149
+ Input.
150
+
151
+ """
152
+ return minkowski_distance(
153
+ 0, np.maximum(0, np.maximum(self.mins-x, x-self.maxes)),
154
+ p
155
+ )
156
+
157
+ def max_distance_point(self, x, p=2.):
158
+ """
159
+ Return the maximum distance between input and points in the hyperrectangle.
160
+
161
+ Parameters
162
+ ----------
163
+ x : array_like
164
+ Input array.
165
+ p : float, optional
166
+ Input.
167
+
168
+ """
169
+ return minkowski_distance(0, np.maximum(self.maxes-x, x-self.mins), p)
170
+
171
+ def min_distance_rectangle(self, other, p=2.):
172
+ """
173
+ Compute the minimum distance between points in the two hyperrectangles.
174
+
175
+ Parameters
176
+ ----------
177
+ other : hyperrectangle
178
+ Input.
179
+ p : float
180
+ Input.
181
+
182
+ """
183
+ return minkowski_distance(
184
+ 0,
185
+ np.maximum(0, np.maximum(self.mins-other.maxes,
186
+ other.mins-self.maxes)),
187
+ p
188
+ )
189
+
190
+ def max_distance_rectangle(self, other, p=2.):
191
+ """
192
+ Compute the maximum distance between points in the two hyperrectangles.
193
+
194
+ Parameters
195
+ ----------
196
+ other : hyperrectangle
197
+ Input.
198
+ p : float, optional
199
+ Input.
200
+
201
+ """
202
+ return minkowski_distance(
203
+ 0, np.maximum(self.maxes-other.mins, other.maxes-self.mins), p)
204
+
205
+
206
+ class KDTree(cKDTree):
207
+ """kd-tree for quick nearest-neighbor lookup.
208
+
209
+ This class provides an index into a set of k-dimensional points
210
+ which can be used to rapidly look up the nearest neighbors of any
211
+ point.
212
+
213
+ Parameters
214
+ ----------
215
+ data : array_like, shape (n,m)
216
+ The n data points of dimension m to be indexed. This array is
217
+ not copied unless this is necessary to produce a contiguous
218
+ array of doubles, and so modifying this data will result in
219
+ bogus results. The data are also copied if the kd-tree is built
220
+ with copy_data=True.
221
+ leafsize : positive int, optional
222
+ The number of points at which the algorithm switches over to
223
+ brute-force. Default: 10.
224
+ compact_nodes : bool, optional
225
+ If True, the kd-tree is built to shrink the hyperrectangles to
226
+ the actual data range. This usually gives a more compact tree that
227
+ is robust against degenerated input data and gives faster queries
228
+ at the expense of longer build time. Default: True.
229
+ copy_data : bool, optional
230
+ If True the data is always copied to protect the kd-tree against
231
+ data corruption. Default: False.
232
+ balanced_tree : bool, optional
233
+ If True, the median is used to split the hyperrectangles instead of
234
+ the midpoint. This usually gives a more compact tree and
235
+ faster queries at the expense of longer build time. Default: True.
236
+ boxsize : array_like or scalar, optional
237
+ Apply a m-d toroidal topology to the KDTree.. The topology is generated
238
+ by :math:`x_i + n_i L_i` where :math:`n_i` are integers and :math:`L_i`
239
+ is the boxsize along i-th dimension. The input data shall be wrapped
240
+ into :math:`[0, L_i)`. A ValueError is raised if any of the data is
241
+ outside of this bound.
242
+
243
+ Notes
244
+ -----
245
+ The algorithm used is described in Maneewongvatana and Mount 1999.
246
+ The general idea is that the kd-tree is a binary tree, each of whose
247
+ nodes represents an axis-aligned hyperrectangle. Each node specifies
248
+ an axis and splits the set of points based on whether their coordinate
249
+ along that axis is greater than or less than a particular value.
250
+
251
+ During construction, the axis and splitting point are chosen by the
252
+ "sliding midpoint" rule, which ensures that the cells do not all
253
+ become long and thin.
254
+
255
+ The tree can be queried for the r closest neighbors of any given point
256
+ (optionally returning only those within some maximum distance of the
257
+ point). It can also be queried, with a substantial gain in efficiency,
258
+ for the r approximate closest neighbors.
259
+
260
+ For large dimensions (20 is already large) do not expect this to run
261
+ significantly faster than brute force. High-dimensional nearest-neighbor
262
+ queries are a substantial open problem in computer science.
263
+
264
+ Attributes
265
+ ----------
266
+ data : ndarray, shape (n,m)
267
+ The n data points of dimension m to be indexed. This array is
268
+ not copied unless this is necessary to produce a contiguous
269
+ array of doubles. The data are also copied if the kd-tree is built
270
+ with `copy_data=True`.
271
+ leafsize : positive int
272
+ The number of points at which the algorithm switches over to
273
+ brute-force.
274
+ m : int
275
+ The dimension of a single data-point.
276
+ n : int
277
+ The number of data points.
278
+ maxes : ndarray, shape (m,)
279
+ The maximum value in each dimension of the n data points.
280
+ mins : ndarray, shape (m,)
281
+ The minimum value in each dimension of the n data points.
282
+ size : int
283
+ The number of nodes in the tree.
284
+
285
+ """
286
+
287
+ class node:
288
+ @staticmethod
289
+ def _create(ckdtree_node=None):
290
+ """Create either an inner or leaf node, wrapping a cKDTreeNode instance"""
291
+ if ckdtree_node is None:
292
+ return KDTree.node(ckdtree_node)
293
+ elif ckdtree_node.split_dim == -1:
294
+ return KDTree.leafnode(ckdtree_node)
295
+ else:
296
+ return KDTree.innernode(ckdtree_node)
297
+
298
+ def __init__(self, ckdtree_node=None):
299
+ if ckdtree_node is None:
300
+ ckdtree_node = cKDTreeNode()
301
+ self._node = ckdtree_node
302
+
303
+ def __lt__(self, other):
304
+ return id(self) < id(other)
305
+
306
+ def __gt__(self, other):
307
+ return id(self) > id(other)
308
+
309
+ def __le__(self, other):
310
+ return id(self) <= id(other)
311
+
312
+ def __ge__(self, other):
313
+ return id(self) >= id(other)
314
+
315
+ def __eq__(self, other):
316
+ return id(self) == id(other)
317
+
318
+ class leafnode(node):
319
+ @property
320
+ def idx(self):
321
+ return self._node.indices
322
+
323
+ @property
324
+ def children(self):
325
+ return self._node.children
326
+
327
+ class innernode(node):
328
+ def __init__(self, ckdtreenode):
329
+ assert isinstance(ckdtreenode, cKDTreeNode)
330
+ super().__init__(ckdtreenode)
331
+ self.less = KDTree.node._create(ckdtreenode.lesser)
332
+ self.greater = KDTree.node._create(ckdtreenode.greater)
333
+
334
+ @property
335
+ def split_dim(self):
336
+ return self._node.split_dim
337
+
338
+ @property
339
+ def split(self):
340
+ return self._node.split
341
+
342
+ @property
343
+ def children(self):
344
+ return self._node.children
345
+
346
+ @property
347
+ def tree(self):
348
+ if not hasattr(self, "_tree"):
349
+ self._tree = KDTree.node._create(super().tree)
350
+
351
+ return self._tree
352
+
353
+ def __init__(self, data, leafsize=10, compact_nodes=True, copy_data=False,
354
+ balanced_tree=True, boxsize=None):
355
+ data = np.asarray(data)
356
+ if data.dtype.kind == 'c':
357
+ raise TypeError("KDTree does not work with complex data")
358
+
359
+ # Note KDTree has different default leafsize from cKDTree
360
+ super().__init__(data, leafsize, compact_nodes, copy_data,
361
+ balanced_tree, boxsize)
362
+
363
+ def query(
364
+ self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf, workers=1):
365
+ r"""Query the kd-tree for nearest neighbors.
366
+
367
+ Parameters
368
+ ----------
369
+ x : array_like, last dimension self.m
370
+ An array of points to query.
371
+ k : int or Sequence[int], optional
372
+ Either the number of nearest neighbors to return, or a list of the
373
+ k-th nearest neighbors to return, starting from 1.
374
+ eps : nonnegative float, optional
375
+ Return approximate nearest neighbors; the kth returned value
376
+ is guaranteed to be no further than (1+eps) times the
377
+ distance to the real kth nearest neighbor.
378
+ p : float, 1<=p<=infinity, optional
379
+ Which Minkowski p-norm to use.
380
+ 1 is the sum-of-absolute-values distance ("Manhattan" distance).
381
+ 2 is the usual Euclidean distance.
382
+ infinity is the maximum-coordinate-difference distance.
383
+ A large, finite p may cause a ValueError if overflow can occur.
384
+ distance_upper_bound : nonnegative float, optional
385
+ Return only neighbors within this distance. This is used to prune
386
+ tree searches, so if you are doing a series of nearest-neighbor
387
+ queries, it may help to supply the distance to the nearest neighbor
388
+ of the most recent point.
389
+ workers : int, optional
390
+ Number of workers to use for parallel processing. If -1 is given
391
+ all CPU threads are used. Default: 1.
392
+
393
+ .. versionadded:: 1.6.0
394
+
395
+ Returns
396
+ -------
397
+ d : float or array of floats
398
+ The distances to the nearest neighbors.
399
+ If ``x`` has shape ``tuple+(self.m,)``, then ``d`` has shape
400
+ ``tuple+(k,)``.
401
+ When k == 1, the last dimension of the output is squeezed.
402
+ Missing neighbors are indicated with infinite distances.
403
+ Hits are sorted by distance (nearest first).
404
+
405
+ .. versionchanged:: 1.9.0
406
+ Previously if ``k=None``, then `d` was an object array of
407
+ shape ``tuple``, containing lists of distances. This behavior
408
+ has been removed, use `query_ball_point` instead.
409
+
410
+ i : integer or array of integers
411
+ The index of each neighbor in ``self.data``.
412
+ ``i`` is the same shape as d.
413
+ Missing neighbors are indicated with ``self.n``.
414
+
415
+ Examples
416
+ --------
417
+
418
+ >>> import numpy as np
419
+ >>> from scipy.spatial import KDTree
420
+ >>> x, y = np.mgrid[0:5, 2:8]
421
+ >>> tree = KDTree(np.c_[x.ravel(), y.ravel()])
422
+
423
+ To query the nearest neighbours and return squeezed result, use
424
+
425
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=1)
426
+ >>> print(dd, ii, sep='\n')
427
+ [2. 0.2236068]
428
+ [ 0 13]
429
+
430
+ To query the nearest neighbours and return unsqueezed result, use
431
+
432
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=[1])
433
+ >>> print(dd, ii, sep='\n')
434
+ [[2. ]
435
+ [0.2236068]]
436
+ [[ 0]
437
+ [13]]
438
+
439
+ To query the second nearest neighbours and return unsqueezed result,
440
+ use
441
+
442
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=[2])
443
+ >>> print(dd, ii, sep='\n')
444
+ [[2.23606798]
445
+ [0.80622577]]
446
+ [[ 6]
447
+ [19]]
448
+
449
+ To query the first and second nearest neighbours, use
450
+
451
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=2)
452
+ >>> print(dd, ii, sep='\n')
453
+ [[2. 2.23606798]
454
+ [0.2236068 0.80622577]]
455
+ [[ 0 6]
456
+ [13 19]]
457
+
458
+ or, be more specific
459
+
460
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=[1, 2])
461
+ >>> print(dd, ii, sep='\n')
462
+ [[2. 2.23606798]
463
+ [0.2236068 0.80622577]]
464
+ [[ 0 6]
465
+ [13 19]]
466
+
467
+ """
468
+ x = np.asarray(x)
469
+ if x.dtype.kind == 'c':
470
+ raise TypeError("KDTree does not work with complex data")
471
+
472
+ if k is None:
473
+ raise ValueError("k must be an integer or a sequence of integers")
474
+
475
+ d, i = super().query(x, k, eps, p, distance_upper_bound, workers)
476
+ if isinstance(i, int):
477
+ i = np.intp(i)
478
+ return d, i
479
+
480
+ def query_ball_point(self, x, r, p=2., eps=0, workers=1,
481
+ return_sorted=None, return_length=False):
482
+ """Find all points within distance r of point(s) x.
483
+
484
+ Parameters
485
+ ----------
486
+ x : array_like, shape tuple + (self.m,)
487
+ The point or points to search for neighbors of.
488
+ r : array_like, float
489
+ The radius of points to return, must broadcast to the length of x.
490
+ p : float, optional
491
+ Which Minkowski p-norm to use. Should be in the range [1, inf].
492
+ A finite large p may cause a ValueError if overflow can occur.
493
+ eps : nonnegative float, optional
494
+ Approximate search. Branches of the tree are not explored if their
495
+ nearest points are further than ``r / (1 + eps)``, and branches are
496
+ added in bulk if their furthest points are nearer than
497
+ ``r * (1 + eps)``.
498
+ workers : int, optional
499
+ Number of jobs to schedule for parallel processing. If -1 is given
500
+ all processors are used. Default: 1.
501
+
502
+ .. versionadded:: 1.6.0
503
+ return_sorted : bool, optional
504
+ Sorts returned indices if True and does not sort them if False. If
505
+ None, does not sort single point queries, but does sort
506
+ multi-point queries which was the behavior before this option
507
+ was added.
508
+
509
+ .. versionadded:: 1.6.0
510
+ return_length : bool, optional
511
+ Return the number of points inside the radius instead of a list
512
+ of the indices.
513
+
514
+ .. versionadded:: 1.6.0
515
+
516
+ Returns
517
+ -------
518
+ results : list or array of lists
519
+ If `x` is a single point, returns a list of the indices of the
520
+ neighbors of `x`. If `x` is an array of points, returns an object
521
+ array of shape tuple containing lists of neighbors.
522
+
523
+ Notes
524
+ -----
525
+ If you have many points whose neighbors you want to find, you may save
526
+ substantial amounts of time by putting them in a KDTree and using
527
+ query_ball_tree.
528
+
529
+ Examples
530
+ --------
531
+ >>> import numpy as np
532
+ >>> from scipy import spatial
533
+ >>> x, y = np.mgrid[0:5, 0:5]
534
+ >>> points = np.c_[x.ravel(), y.ravel()]
535
+ >>> tree = spatial.KDTree(points)
536
+ >>> sorted(tree.query_ball_point([2, 0], 1))
537
+ [5, 10, 11, 15]
538
+
539
+ Query multiple points and plot the results:
540
+
541
+ >>> import matplotlib.pyplot as plt
542
+ >>> points = np.asarray(points)
543
+ >>> plt.plot(points[:,0], points[:,1], '.')
544
+ >>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):
545
+ ... nearby_points = points[results]
546
+ ... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')
547
+ >>> plt.margins(0.1, 0.1)
548
+ >>> plt.show()
549
+
550
+ """
551
+ x = np.asarray(x)
552
+ if x.dtype.kind == 'c':
553
+ raise TypeError("KDTree does not work with complex data")
554
+ return super().query_ball_point(
555
+ x, r, p, eps, workers, return_sorted, return_length)
556
+
557
+ def query_ball_tree(self, other, r, p=2., eps=0):
558
+ """
559
+ Find all pairs of points between `self` and `other` whose distance is
560
+ at most r.
561
+
562
+ Parameters
563
+ ----------
564
+ other : KDTree instance
565
+ The tree containing points to search against.
566
+ r : float
567
+ The maximum distance, has to be positive.
568
+ p : float, optional
569
+ Which Minkowski norm to use. `p` has to meet the condition
570
+ ``1 <= p <= infinity``.
571
+ eps : float, optional
572
+ Approximate search. Branches of the tree are not explored
573
+ if their nearest points are further than ``r/(1+eps)``, and
574
+ branches are added in bulk if their furthest points are nearer
575
+ than ``r * (1+eps)``. `eps` has to be non-negative.
576
+
577
+ Returns
578
+ -------
579
+ results : list of lists
580
+ For each element ``self.data[i]`` of this tree, ``results[i]`` is a
581
+ list of the indices of its neighbors in ``other.data``.
582
+
583
+ Examples
584
+ --------
585
+ You can search all pairs of points between two kd-trees within a distance:
586
+
587
+ >>> import matplotlib.pyplot as plt
588
+ >>> import numpy as np
589
+ >>> from scipy.spatial import KDTree
590
+ >>> rng = np.random.default_rng()
591
+ >>> points1 = rng.random((15, 2))
592
+ >>> points2 = rng.random((15, 2))
593
+ >>> plt.figure(figsize=(6, 6))
594
+ >>> plt.plot(points1[:, 0], points1[:, 1], "xk", markersize=14)
595
+ >>> plt.plot(points2[:, 0], points2[:, 1], "og", markersize=14)
596
+ >>> kd_tree1 = KDTree(points1)
597
+ >>> kd_tree2 = KDTree(points2)
598
+ >>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
599
+ >>> for i in range(len(indexes)):
600
+ ... for j in indexes[i]:
601
+ ... plt.plot([points1[i, 0], points2[j, 0]],
602
+ ... [points1[i, 1], points2[j, 1]], "-r")
603
+ >>> plt.show()
604
+
605
+ """
606
+ return super().query_ball_tree(other, r, p, eps)
607
+
608
+ def query_pairs(self, r, p=2., eps=0, output_type='set'):
609
+ """Find all pairs of points in `self` whose distance is at most r.
610
+
611
+ Parameters
612
+ ----------
613
+ r : positive float
614
+ The maximum distance.
615
+ p : float, optional
616
+ Which Minkowski norm to use. `p` has to meet the condition
617
+ ``1 <= p <= infinity``.
618
+ eps : float, optional
619
+ Approximate search. Branches of the tree are not explored
620
+ if their nearest points are further than ``r/(1+eps)``, and
621
+ branches are added in bulk if their furthest points are nearer
622
+ than ``r * (1+eps)``. `eps` has to be non-negative.
623
+ output_type : string, optional
624
+ Choose the output container, 'set' or 'ndarray'. Default: 'set'
625
+
626
+ .. versionadded:: 1.6.0
627
+
628
+ Returns
629
+ -------
630
+ results : set or ndarray
631
+ Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
632
+ positions are close. If output_type is 'ndarray', an ndarry is
633
+ returned instead of a set.
634
+
635
+ Examples
636
+ --------
637
+ You can search all pairs of points in a kd-tree within a distance:
638
+
639
+ >>> import matplotlib.pyplot as plt
640
+ >>> import numpy as np
641
+ >>> from scipy.spatial import KDTree
642
+ >>> rng = np.random.default_rng()
643
+ >>> points = rng.random((20, 2))
644
+ >>> plt.figure(figsize=(6, 6))
645
+ >>> plt.plot(points[:, 0], points[:, 1], "xk", markersize=14)
646
+ >>> kd_tree = KDTree(points)
647
+ >>> pairs = kd_tree.query_pairs(r=0.2)
648
+ >>> for (i, j) in pairs:
649
+ ... plt.plot([points[i, 0], points[j, 0]],
650
+ ... [points[i, 1], points[j, 1]], "-r")
651
+ >>> plt.show()
652
+
653
+ """
654
+ return super().query_pairs(r, p, eps, output_type)
655
+
656
+ def count_neighbors(self, other, r, p=2., weights=None, cumulative=True):
657
+ """Count how many nearby pairs can be formed.
658
+
659
+ Count the number of pairs ``(x1,x2)`` can be formed, with ``x1`` drawn
660
+ from ``self`` and ``x2`` drawn from ``other``, and where
661
+ ``distance(x1, x2, p) <= r``.
662
+
663
+ Data points on ``self`` and ``other`` are optionally weighted by the
664
+ ``weights`` argument. (See below)
665
+
666
+ This is adapted from the "two-point correlation" algorithm described by
667
+ Gray and Moore [1]_. See notes for further discussion.
668
+
669
+ Parameters
670
+ ----------
671
+ other : KDTree
672
+ The other tree to draw points from, can be the same tree as self.
673
+ r : float or one-dimensional array of floats
674
+ The radius to produce a count for. Multiple radii are searched with
675
+ a single tree traversal.
676
+ If the count is non-cumulative(``cumulative=False``), ``r`` defines
677
+ the edges of the bins, and must be non-decreasing.
678
+ p : float, optional
679
+ 1<=p<=infinity.
680
+ Which Minkowski p-norm to use.
681
+ Default 2.0.
682
+ A finite large p may cause a ValueError if overflow can occur.
683
+ weights : tuple, array_like, or None, optional
684
+ If None, the pair-counting is unweighted.
685
+ If given as a tuple, weights[0] is the weights of points in
686
+ ``self``, and weights[1] is the weights of points in ``other``;
687
+ either can be None to indicate the points are unweighted.
688
+ If given as an array_like, weights is the weights of points in
689
+ ``self`` and ``other``. For this to make sense, ``self`` and
690
+ ``other`` must be the same tree. If ``self`` and ``other`` are two
691
+ different trees, a ``ValueError`` is raised.
692
+ Default: None
693
+
694
+ .. versionadded:: 1.6.0
695
+ cumulative : bool, optional
696
+ Whether the returned counts are cumulative. When cumulative is set
697
+ to ``False`` the algorithm is optimized to work with a large number
698
+ of bins (>10) specified by ``r``. When ``cumulative`` is set to
699
+ True, the algorithm is optimized to work with a small number of
700
+ ``r``. Default: True
701
+
702
+ .. versionadded:: 1.6.0
703
+
704
+ Returns
705
+ -------
706
+ result : scalar or 1-D array
707
+ The number of pairs. For unweighted counts, the result is integer.
708
+ For weighted counts, the result is float.
709
+ If cumulative is False, ``result[i]`` contains the counts with
710
+ ``(-inf if i == 0 else r[i-1]) < R <= r[i]``
711
+
712
+ Notes
713
+ -----
714
+ Pair-counting is the basic operation used to calculate the two point
715
+ correlation functions from a data set composed of position of objects.
716
+
717
+ Two point correlation function measures the clustering of objects and
718
+ is widely used in cosmology to quantify the large scale structure
719
+ in our Universe, but it may be useful for data analysis in other fields
720
+ where self-similar assembly of objects also occur.
721
+
722
+ The Landy-Szalay estimator for the two point correlation function of
723
+ ``D`` measures the clustering signal in ``D``. [2]_
724
+
725
+ For example, given the position of two sets of objects,
726
+
727
+ - objects ``D`` (data) contains the clustering signal, and
728
+
729
+ - objects ``R`` (random) that contains no signal,
730
+
731
+ .. math::
732
+
733
+ \\xi(r) = \\frac{<D, D> - 2 f <D, R> + f^2<R, R>}{f^2<R, R>},
734
+
735
+ where the brackets represents counting pairs between two data sets
736
+ in a finite bin around ``r`` (distance), corresponding to setting
737
+ `cumulative=False`, and ``f = float(len(D)) / float(len(R))`` is the
738
+ ratio between number of objects from data and random.
739
+
740
+ The algorithm implemented here is loosely based on the dual-tree
741
+ algorithm described in [1]_. We switch between two different
742
+ pair-cumulation scheme depending on the setting of ``cumulative``.
743
+ The computing time of the method we use when for
744
+ ``cumulative == False`` does not scale with the total number of bins.
745
+ The algorithm for ``cumulative == True`` scales linearly with the
746
+ number of bins, though it is slightly faster when only
747
+ 1 or 2 bins are used. [5]_.
748
+
749
+ As an extension to the naive pair-counting,
750
+ weighted pair-counting counts the product of weights instead
751
+ of number of pairs.
752
+ Weighted pair-counting is used to estimate marked correlation functions
753
+ ([3]_, section 2.2),
754
+ or to properly calculate the average of data per distance bin
755
+ (e.g. [4]_, section 2.1 on redshift).
756
+
757
+ .. [1] Gray and Moore,
758
+ "N-body problems in statistical learning",
759
+ Mining the sky, 2000,
760
+ https://arxiv.org/abs/astro-ph/0012333
761
+
762
+ .. [2] Landy and Szalay,
763
+ "Bias and variance of angular correlation functions",
764
+ The Astrophysical Journal, 1993,
765
+ http://adsabs.harvard.edu/abs/1993ApJ...412...64L
766
+
767
+ .. [3] Sheth, Connolly and Skibba,
768
+ "Marked correlations in galaxy formation models",
769
+ Arxiv e-print, 2005,
770
+ https://arxiv.org/abs/astro-ph/0511773
771
+
772
+ .. [4] Hawkins, et al.,
773
+ "The 2dF Galaxy Redshift Survey: correlation functions,
774
+ peculiar velocities and the matter density of the Universe",
775
+ Monthly Notices of the Royal Astronomical Society, 2002,
776
+ http://adsabs.harvard.edu/abs/2003MNRAS.346...78H
777
+
778
+ .. [5] https://github.com/scipy/scipy/pull/5647#issuecomment-168474926
779
+
780
+ Examples
781
+ --------
782
+ You can count neighbors number between two kd-trees within a distance:
783
+
784
+ >>> import numpy as np
785
+ >>> from scipy.spatial import KDTree
786
+ >>> rng = np.random.default_rng()
787
+ >>> points1 = rng.random((5, 2))
788
+ >>> points2 = rng.random((5, 2))
789
+ >>> kd_tree1 = KDTree(points1)
790
+ >>> kd_tree2 = KDTree(points2)
791
+ >>> kd_tree1.count_neighbors(kd_tree2, 0.2)
792
+ 1
793
+
794
+ This number is same as the total pair number calculated by
795
+ `query_ball_tree`:
796
+
797
+ >>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
798
+ >>> sum([len(i) for i in indexes])
799
+ 1
800
+
801
+ """
802
+ return super().count_neighbors(other, r, p, weights, cumulative)
803
+
804
+ def sparse_distance_matrix(
805
+ self, other, max_distance, p=2., output_type='dok_matrix'):
806
+ """Compute a sparse distance matrix.
807
+
808
+ Computes a distance matrix between two KDTrees, leaving as zero
809
+ any distance greater than max_distance.
810
+
811
+ Parameters
812
+ ----------
813
+ other : KDTree
814
+
815
+ max_distance : positive float
816
+
817
+ p : float, 1<=p<=infinity
818
+ Which Minkowski p-norm to use.
819
+ A finite large p may cause a ValueError if overflow can occur.
820
+
821
+ output_type : string, optional
822
+ Which container to use for output data. Options: 'dok_matrix',
823
+ 'coo_matrix', 'dict', or 'ndarray'. Default: 'dok_matrix'.
824
+
825
+ .. versionadded:: 1.6.0
826
+
827
+ Returns
828
+ -------
829
+ result : dok_matrix, coo_matrix, dict or ndarray
830
+ Sparse matrix representing the results in "dictionary of keys"
831
+ format. If a dict is returned the keys are (i,j) tuples of indices.
832
+ If output_type is 'ndarray' a record array with fields 'i', 'j',
833
+ and 'v' is returned,
834
+
835
+ Examples
836
+ --------
837
+ You can compute a sparse distance matrix between two kd-trees:
838
+
839
+ >>> import numpy as np
840
+ >>> from scipy.spatial import KDTree
841
+ >>> rng = np.random.default_rng()
842
+ >>> points1 = rng.random((5, 2))
843
+ >>> points2 = rng.random((5, 2))
844
+ >>> kd_tree1 = KDTree(points1)
845
+ >>> kd_tree2 = KDTree(points2)
846
+ >>> sdm = kd_tree1.sparse_distance_matrix(kd_tree2, 0.3)
847
+ >>> sdm.toarray()
848
+ array([[0. , 0. , 0.12295571, 0. , 0. ],
849
+ [0. , 0. , 0. , 0. , 0. ],
850
+ [0.28942611, 0. , 0. , 0.2333084 , 0. ],
851
+ [0. , 0. , 0. , 0. , 0. ],
852
+ [0.24617575, 0.29571802, 0.26836782, 0. , 0. ]])
853
+
854
+ You can check distances above the `max_distance` are zeros:
855
+
856
+ >>> from scipy.spatial import distance_matrix
857
+ >>> distance_matrix(points1, points2)
858
+ array([[0.56906522, 0.39923701, 0.12295571, 0.8658745 , 0.79428925],
859
+ [0.37327919, 0.7225693 , 0.87665969, 0.32580855, 0.75679479],
860
+ [0.28942611, 0.30088013, 0.6395831 , 0.2333084 , 0.33630734],
861
+ [0.31994999, 0.72658602, 0.71124834, 0.55396483, 0.90785663],
862
+ [0.24617575, 0.29571802, 0.26836782, 0.57714465, 0.6473269 ]])
863
+
864
+ """
865
+ return super().sparse_distance_matrix(
866
+ other, max_distance, p, output_type)
867
+
868
+
869
+ def distance_matrix(x, y, p=2, threshold=1000000):
870
+ """Compute the distance matrix.
871
+
872
+ Returns the matrix of all pair-wise distances.
873
+
874
+ Parameters
875
+ ----------
876
+ x : (M, K) array_like
877
+ Matrix of M vectors in K dimensions.
878
+ y : (N, K) array_like
879
+ Matrix of N vectors in K dimensions.
880
+ p : float, 1 <= p <= infinity
881
+ Which Minkowski p-norm to use.
882
+ threshold : positive int
883
+ If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
884
+ of large temporary arrays.
885
+
886
+ Returns
887
+ -------
888
+ result : (M, N) ndarray
889
+ Matrix containing the distance from every vector in `x` to every vector
890
+ in `y`.
891
+
892
+ Examples
893
+ --------
894
+ >>> from scipy.spatial import distance_matrix
895
+ >>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
896
+ array([[ 1. , 1.41421356],
897
+ [ 1.41421356, 1. ]])
898
+
899
+ """
900
+
901
+ x = np.asarray(x)
902
+ m, k = x.shape
903
+ y = np.asarray(y)
904
+ n, kk = y.shape
905
+
906
+ if k != kk:
907
+ raise ValueError(f"x contains {k}-dimensional vectors but y contains "
908
+ f"{kk}-dimensional vectors")
909
+
910
+ if m*n*k <= threshold:
911
+ return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
912
+ else:
913
+ result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype
914
+ if m < n:
915
+ for i in range(m):
916
+ result[i,:] = minkowski_distance(x[i],y,p)
917
+ else:
918
+ for j in range(n):
919
+ result[:,j] = minkowski_distance(x,y[j],p)
920
+ return result
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_plotutils.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy._lib.decorator import decorator as _decorator
3
+
4
+ __all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
5
+
6
+
7
+ @_decorator
8
+ def _held_figure(func, obj, ax=None, **kw):
9
+ import matplotlib.pyplot as plt
10
+
11
+ if ax is None:
12
+ fig = plt.figure()
13
+ ax = fig.gca()
14
+ return func(obj, ax=ax, **kw)
15
+
16
+ # As of matplotlib 2.0, the "hold" mechanism is deprecated.
17
+ # When matplotlib 1.x is no longer supported, this check can be removed.
18
+ was_held = getattr(ax, 'ishold', lambda: True)()
19
+ if was_held:
20
+ return func(obj, ax=ax, **kw)
21
+ try:
22
+ ax.hold(True)
23
+ return func(obj, ax=ax, **kw)
24
+ finally:
25
+ ax.hold(was_held)
26
+
27
+
28
+ def _adjust_bounds(ax, points):
29
+ margin = 0.1 * np.ptp(points, axis=0)
30
+ xy_min = points.min(axis=0) - margin
31
+ xy_max = points.max(axis=0) + margin
32
+ ax.set_xlim(xy_min[0], xy_max[0])
33
+ ax.set_ylim(xy_min[1], xy_max[1])
34
+
35
+
36
+ @_held_figure
37
+ def delaunay_plot_2d(tri, ax=None):
38
+ """
39
+ Plot the given Delaunay triangulation in 2-D
40
+
41
+ Parameters
42
+ ----------
43
+ tri : scipy.spatial.Delaunay instance
44
+ Triangulation to plot
45
+ ax : matplotlib.axes.Axes instance, optional
46
+ Axes to plot on
47
+
48
+ Returns
49
+ -------
50
+ fig : matplotlib.figure.Figure instance
51
+ Figure for the plot
52
+
53
+ See Also
54
+ --------
55
+ Delaunay
56
+ matplotlib.pyplot.triplot
57
+
58
+ Notes
59
+ -----
60
+ Requires Matplotlib.
61
+
62
+ Examples
63
+ --------
64
+
65
+ >>> import numpy as np
66
+ >>> import matplotlib.pyplot as plt
67
+ >>> from scipy.spatial import Delaunay, delaunay_plot_2d
68
+
69
+ The Delaunay triangulation of a set of random points:
70
+
71
+ >>> rng = np.random.default_rng()
72
+ >>> points = rng.random((30, 2))
73
+ >>> tri = Delaunay(points)
74
+
75
+ Plot it:
76
+
77
+ >>> _ = delaunay_plot_2d(tri)
78
+ >>> plt.show()
79
+
80
+ """
81
+ if tri.points.shape[1] != 2:
82
+ raise ValueError("Delaunay triangulation is not 2-D")
83
+
84
+ x, y = tri.points.T
85
+ ax.plot(x, y, 'o')
86
+ ax.triplot(x, y, tri.simplices.copy())
87
+
88
+ _adjust_bounds(ax, tri.points)
89
+
90
+ return ax.figure
91
+
92
+
93
+ @_held_figure
94
+ def convex_hull_plot_2d(hull, ax=None):
95
+ """
96
+ Plot the given convex hull diagram in 2-D
97
+
98
+ Parameters
99
+ ----------
100
+ hull : scipy.spatial.ConvexHull instance
101
+ Convex hull to plot
102
+ ax : matplotlib.axes.Axes instance, optional
103
+ Axes to plot on
104
+
105
+ Returns
106
+ -------
107
+ fig : matplotlib.figure.Figure instance
108
+ Figure for the plot
109
+
110
+ See Also
111
+ --------
112
+ ConvexHull
113
+
114
+ Notes
115
+ -----
116
+ Requires Matplotlib.
117
+
118
+
119
+ Examples
120
+ --------
121
+
122
+ >>> import numpy as np
123
+ >>> import matplotlib.pyplot as plt
124
+ >>> from scipy.spatial import ConvexHull, convex_hull_plot_2d
125
+
126
+ The convex hull of a random set of points:
127
+
128
+ >>> rng = np.random.default_rng()
129
+ >>> points = rng.random((30, 2))
130
+ >>> hull = ConvexHull(points)
131
+
132
+ Plot it:
133
+
134
+ >>> _ = convex_hull_plot_2d(hull)
135
+ >>> plt.show()
136
+
137
+ """
138
+ from matplotlib.collections import LineCollection
139
+
140
+ if hull.points.shape[1] != 2:
141
+ raise ValueError("Convex hull is not 2-D")
142
+
143
+ ax.plot(hull.points[:, 0], hull.points[:, 1], 'o')
144
+ line_segments = [hull.points[simplex] for simplex in hull.simplices]
145
+ ax.add_collection(LineCollection(line_segments,
146
+ colors='k',
147
+ linestyle='solid'))
148
+ _adjust_bounds(ax, hull.points)
149
+
150
+ return ax.figure
151
+
152
+
153
+ @_held_figure
154
+ def voronoi_plot_2d(vor, ax=None, **kw):
155
+ """
156
+ Plot the given Voronoi diagram in 2-D
157
+
158
+ Parameters
159
+ ----------
160
+ vor : scipy.spatial.Voronoi instance
161
+ Diagram to plot
162
+ ax : matplotlib.axes.Axes instance, optional
163
+ Axes to plot on
164
+ show_points : bool, optional
165
+ Add the Voronoi points to the plot.
166
+ show_vertices : bool, optional
167
+ Add the Voronoi vertices to the plot.
168
+ line_colors : string, optional
169
+ Specifies the line color for polygon boundaries
170
+ line_width : float, optional
171
+ Specifies the line width for polygon boundaries
172
+ line_alpha : float, optional
173
+ Specifies the line alpha for polygon boundaries
174
+ point_size : float, optional
175
+ Specifies the size of points
176
+
177
+ Returns
178
+ -------
179
+ fig : matplotlib.figure.Figure instance
180
+ Figure for the plot
181
+
182
+ See Also
183
+ --------
184
+ Voronoi
185
+
186
+ Notes
187
+ -----
188
+ Requires Matplotlib.
189
+
190
+ Examples
191
+ --------
192
+ >>> import numpy as np
193
+ >>> import matplotlib.pyplot as plt
194
+ >>> from scipy.spatial import Voronoi, voronoi_plot_2d
195
+
196
+ Create a set of points for the example:
197
+
198
+ >>> rng = np.random.default_rng()
199
+ >>> points = rng.random((10,2))
200
+
201
+ Generate the Voronoi diagram for the points:
202
+
203
+ >>> vor = Voronoi(points)
204
+
205
+ Use `voronoi_plot_2d` to plot the diagram:
206
+
207
+ >>> fig = voronoi_plot_2d(vor)
208
+
209
+ Use `voronoi_plot_2d` to plot the diagram again, with some settings
210
+ customized:
211
+
212
+ >>> fig = voronoi_plot_2d(vor, show_vertices=False, line_colors='orange',
213
+ ... line_width=2, line_alpha=0.6, point_size=2)
214
+ >>> plt.show()
215
+
216
+ """
217
+ from matplotlib.collections import LineCollection
218
+
219
+ if vor.points.shape[1] != 2:
220
+ raise ValueError("Voronoi diagram is not 2-D")
221
+
222
+ if kw.get('show_points', True):
223
+ point_size = kw.get('point_size', None)
224
+ ax.plot(vor.points[:, 0], vor.points[:, 1], '.', markersize=point_size)
225
+ if kw.get('show_vertices', True):
226
+ ax.plot(vor.vertices[:, 0], vor.vertices[:, 1], 'o')
227
+
228
+ line_colors = kw.get('line_colors', 'k')
229
+ line_width = kw.get('line_width', 1.0)
230
+ line_alpha = kw.get('line_alpha', 1.0)
231
+
232
+ center = vor.points.mean(axis=0)
233
+ ptp_bound = np.ptp(vor.points, axis=0)
234
+
235
+ finite_segments = []
236
+ infinite_segments = []
237
+ for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
238
+ simplex = np.asarray(simplex)
239
+ if np.all(simplex >= 0):
240
+ finite_segments.append(vor.vertices[simplex])
241
+ else:
242
+ i = simplex[simplex >= 0][0] # finite end Voronoi vertex
243
+
244
+ t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
245
+ t /= np.linalg.norm(t)
246
+ n = np.array([-t[1], t[0]]) # normal
247
+
248
+ midpoint = vor.points[pointidx].mean(axis=0)
249
+ direction = np.sign(np.dot(midpoint - center, n)) * n
250
+ if (vor.furthest_site):
251
+ direction = -direction
252
+ aspect_factor = abs(ptp_bound.max() / ptp_bound.min())
253
+ far_point = vor.vertices[i] + direction * ptp_bound.max() * aspect_factor
254
+
255
+ infinite_segments.append([vor.vertices[i], far_point])
256
+
257
+ ax.add_collection(LineCollection(finite_segments,
258
+ colors=line_colors,
259
+ lw=line_width,
260
+ alpha=line_alpha,
261
+ linestyle='solid'))
262
+ ax.add_collection(LineCollection(infinite_segments,
263
+ colors=line_colors,
264
+ lw=line_width,
265
+ alpha=line_alpha,
266
+ linestyle='dashed'))
267
+
268
+ _adjust_bounds(ax, vor.points)
269
+
270
+ return ax.figure
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_procrustes.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module provides functions to perform full Procrustes analysis.
3
+
4
+ This code was originally written by Justin Kucynski and ported over from
5
+ scikit-bio by Yoshiki Vazquez-Baeza.
6
+ """
7
+
8
+ import numpy as np
9
+ from scipy.linalg import orthogonal_procrustes
10
+
11
+
12
+ __all__ = ['procrustes']
13
+
14
+
15
+ def procrustes(data1, data2):
16
+ r"""Procrustes analysis, a similarity test for two data sets.
17
+
18
+ Each input matrix is a set of points or vectors (the rows of the matrix).
19
+ The dimension of the space is the number of columns of each matrix. Given
20
+ two identically sized matrices, procrustes standardizes both such that:
21
+
22
+ - :math:`tr(AA^{T}) = 1`.
23
+
24
+ - Both sets of points are centered around the origin.
25
+
26
+ Procrustes ([1]_, [2]_) then applies the optimal transform to the second
27
+ matrix (including scaling/dilation, rotations, and reflections) to minimize
28
+ :math:`M^{2}=\sum(data1-data2)^{2}`, or the sum of the squares of the
29
+ pointwise differences between the two input datasets.
30
+
31
+ This function was not designed to handle datasets with different numbers of
32
+ datapoints (rows). If two data sets have different dimensionality
33
+ (different number of columns), simply add columns of zeros to the smaller
34
+ of the two.
35
+
36
+ Parameters
37
+ ----------
38
+ data1 : array_like
39
+ Matrix, n rows represent points in k (columns) space `data1` is the
40
+ reference data, after it is standardised, the data from `data2` will be
41
+ transformed to fit the pattern in `data1` (must have >1 unique points).
42
+ data2 : array_like
43
+ n rows of data in k space to be fit to `data1`. Must be the same
44
+ shape ``(numrows, numcols)`` as data1 (must have >1 unique points).
45
+
46
+ Returns
47
+ -------
48
+ mtx1 : array_like
49
+ A standardized version of `data1`.
50
+ mtx2 : array_like
51
+ The orientation of `data2` that best fits `data1`. Centered, but not
52
+ necessarily :math:`tr(AA^{T}) = 1`.
53
+ disparity : float
54
+ :math:`M^{2}` as defined above.
55
+
56
+ Raises
57
+ ------
58
+ ValueError
59
+ If the input arrays are not two-dimensional.
60
+ If the shape of the input arrays is different.
61
+ If the input arrays have zero columns or zero rows.
62
+
63
+ See Also
64
+ --------
65
+ scipy.linalg.orthogonal_procrustes
66
+ scipy.spatial.distance.directed_hausdorff : Another similarity test
67
+ for two data sets
68
+
69
+ Notes
70
+ -----
71
+ - The disparity should not depend on the order of the input matrices, but
72
+ the output matrices will, as only the first output matrix is guaranteed
73
+ to be scaled such that :math:`tr(AA^{T}) = 1`.
74
+
75
+ - Duplicate data points are generally ok, duplicating a data point will
76
+ increase its effect on the procrustes fit.
77
+
78
+ - The disparity scales as the number of points per input matrix.
79
+
80
+ References
81
+ ----------
82
+ .. [1] Krzanowski, W. J. (2000). "Principles of Multivariate analysis".
83
+ .. [2] Gower, J. C. (1975). "Generalized procrustes analysis".
84
+
85
+ Examples
86
+ --------
87
+ >>> import numpy as np
88
+ >>> from scipy.spatial import procrustes
89
+
90
+ The matrix ``b`` is a rotated, shifted, scaled and mirrored version of
91
+ ``a`` here:
92
+
93
+ >>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
94
+ >>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
95
+ >>> mtx1, mtx2, disparity = procrustes(a, b)
96
+ >>> round(disparity)
97
+ 0.0
98
+
99
+ """
100
+ mtx1 = np.array(data1, dtype=np.float64, copy=True)
101
+ mtx2 = np.array(data2, dtype=np.float64, copy=True)
102
+
103
+ if mtx1.ndim != 2 or mtx2.ndim != 2:
104
+ raise ValueError("Input matrices must be two-dimensional")
105
+ if mtx1.shape != mtx2.shape:
106
+ raise ValueError("Input matrices must be of same shape")
107
+ if mtx1.size == 0:
108
+ raise ValueError("Input matrices must be >0 rows and >0 cols")
109
+
110
+ # translate all the data to the origin
111
+ mtx1 -= np.mean(mtx1, 0)
112
+ mtx2 -= np.mean(mtx2, 0)
113
+
114
+ norm1 = np.linalg.norm(mtx1)
115
+ norm2 = np.linalg.norm(mtx2)
116
+
117
+ if norm1 == 0 or norm2 == 0:
118
+ raise ValueError("Input matrices must contain >1 unique points")
119
+
120
+ # change scaling of data (in rows) such that trace(mtx*mtx') = 1
121
+ mtx1 /= norm1
122
+ mtx2 /= norm2
123
+
124
+ # transform mtx2 to minimize disparity
125
+ R, s = orthogonal_procrustes(mtx1, mtx2)
126
+ mtx2 = np.dot(mtx2, R.T) * s
127
+
128
+ # measure the dissimilarity between the two datasets
129
+ disparity = np.sum(np.square(mtx1 - mtx2))
130
+
131
+ return mtx1, mtx2, disparity
132
+
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_qhull.pyi ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Static type checking stub file for scipy/spatial/qhull.pyx
3
+ '''
4
+
5
+
6
+ import numpy as np
7
+ from numpy.typing import ArrayLike, NDArray
8
+ from typing_extensions import final
9
+
10
+ class QhullError(RuntimeError):
11
+ ...
12
+
13
+ @final
14
+ class _Qhull:
15
+ # Read-only cython attribute that behaves, more or less, like a property
16
+ @property
17
+ def ndim(self) -> int: ...
18
+ mode_option: bytes
19
+ options: bytes
20
+ furthest_site: bool
21
+
22
+ def __init__(
23
+ self,
24
+ mode_option: bytes,
25
+ points: NDArray[np.float64],
26
+ options: None | bytes = ...,
27
+ required_options: None | bytes = ...,
28
+ furthest_site: bool = ...,
29
+ incremental: bool = ...,
30
+ interior_point: None | NDArray[np.float64] = ...,
31
+ ) -> None: ...
32
+ def check_active(self) -> None: ...
33
+ def close(self) -> None: ...
34
+ def get_points(self) -> NDArray[np.float64]: ...
35
+ def add_points(
36
+ self,
37
+ points: ArrayLike,
38
+ interior_point: ArrayLike = ...
39
+ ) -> None: ...
40
+ def get_paraboloid_shift_scale(self) -> tuple[float, float]: ...
41
+ def volume_area(self) -> tuple[float, float]: ...
42
+ def triangulate(self) -> None: ...
43
+ def get_simplex_facet_array(self) -> tuple[
44
+ NDArray[np.intc],
45
+ NDArray[np.intc],
46
+ NDArray[np.float64],
47
+ NDArray[np.intc],
48
+ NDArray[np.intc],
49
+ ]: ...
50
+ def get_hull_points(self) -> NDArray[np.float64]: ...
51
+ def get_hull_facets(self) -> tuple[
52
+ list[list[int]],
53
+ NDArray[np.float64],
54
+ ]: ...
55
+ def get_voronoi_diagram(self) -> tuple[
56
+ NDArray[np.float64],
57
+ NDArray[np.intc],
58
+ list[list[int]],
59
+ list[list[int]],
60
+ NDArray[np.intp],
61
+ ]: ...
62
+ def get_extremes_2d(self) -> NDArray[np.intc]: ...
63
+
64
+ def _get_barycentric_transforms(
65
+ points: NDArray[np.float64],
66
+ simplices: NDArray[np.intc],
67
+ eps: float
68
+ ) -> NDArray[np.float64]: ...
69
+
70
+ class _QhullUser:
71
+ ndim: int
72
+ npoints: int
73
+ min_bound: NDArray[np.float64]
74
+ max_bound: NDArray[np.float64]
75
+
76
+ def __init__(self, qhull: _Qhull, incremental: bool = ...) -> None: ...
77
+ def close(self) -> None: ...
78
+ def _update(self, qhull: _Qhull) -> None: ...
79
+ def _add_points(
80
+ self,
81
+ points: ArrayLike,
82
+ restart: bool = ...,
83
+ interior_point: ArrayLike = ...
84
+ ) -> None: ...
85
+
86
+ class Delaunay(_QhullUser):
87
+ furthest_site: bool
88
+ paraboloid_scale: float
89
+ paraboloid_shift: float
90
+ simplices: NDArray[np.intc]
91
+ neighbors: NDArray[np.intc]
92
+ equations: NDArray[np.float64]
93
+ coplanar: NDArray[np.intc]
94
+ good: NDArray[np.intc]
95
+ nsimplex: int
96
+ vertices: NDArray[np.intc]
97
+
98
+ def __init__(
99
+ self,
100
+ points: ArrayLike,
101
+ furthest_site: bool = ...,
102
+ incremental: bool = ...,
103
+ qhull_options: None | str = ...
104
+ ) -> None: ...
105
+ def _update(self, qhull: _Qhull) -> None: ...
106
+ def add_points(
107
+ self,
108
+ points: ArrayLike,
109
+ restart: bool = ...
110
+ ) -> None: ...
111
+ @property
112
+ def points(self) -> NDArray[np.float64]: ...
113
+ @property
114
+ def transform(self) -> NDArray[np.float64]: ...
115
+ @property
116
+ def vertex_to_simplex(self) -> NDArray[np.intc]: ...
117
+ @property
118
+ def vertex_neighbor_vertices(self) -> tuple[
119
+ NDArray[np.intc],
120
+ NDArray[np.intc],
121
+ ]: ...
122
+ @property
123
+ def convex_hull(self) -> NDArray[np.intc]: ...
124
+ def find_simplex(
125
+ self,
126
+ xi: ArrayLike,
127
+ bruteforce: bool = ...,
128
+ tol: float = ...
129
+ ) -> NDArray[np.intc]: ...
130
+ def plane_distance(self, xi: ArrayLike) -> NDArray[np.float64]: ...
131
+ def lift_points(self, x: ArrayLike) -> NDArray[np.float64]: ...
132
+
133
+ def tsearch(tri: Delaunay, xi: ArrayLike) -> NDArray[np.intc]: ...
134
+ def _copy_docstr(dst: object, src: object) -> None: ...
135
+
136
+ class ConvexHull(_QhullUser):
137
+ simplices: NDArray[np.intc]
138
+ neighbors: NDArray[np.intc]
139
+ equations: NDArray[np.float64]
140
+ coplanar: NDArray[np.intc]
141
+ good: None | NDArray[np.bool_]
142
+ volume: float
143
+ area: float
144
+ nsimplex: int
145
+
146
+ def __init__(
147
+ self,
148
+ points: ArrayLike,
149
+ incremental: bool = ...,
150
+ qhull_options: None | str = ...
151
+ ) -> None: ...
152
+ def _update(self, qhull: _Qhull) -> None: ...
153
+ def add_points(self, points: ArrayLike,
154
+ restart: bool = ...) -> None: ...
155
+ @property
156
+ def points(self) -> NDArray[np.float64]: ...
157
+ @property
158
+ def vertices(self) -> NDArray[np.intc]: ...
159
+
160
+ class Voronoi(_QhullUser):
161
+ vertices: NDArray[np.float64]
162
+ ridge_points: NDArray[np.intc]
163
+ ridge_vertices: list[list[int]]
164
+ regions: list[list[int]]
165
+ point_region: NDArray[np.intp]
166
+ furthest_site: bool
167
+
168
+ def __init__(
169
+ self,
170
+ points: ArrayLike,
171
+ furthest_site: bool = ...,
172
+ incremental: bool = ...,
173
+ qhull_options: None | str = ...
174
+ ) -> None: ...
175
+ def _update(self, qhull: _Qhull) -> None: ...
176
+ def add_points(
177
+ self,
178
+ points: ArrayLike,
179
+ restart: bool = ...
180
+ ) -> None: ...
181
+ @property
182
+ def points(self) -> NDArray[np.float64]: ...
183
+ @property
184
+ def ridge_dict(self) -> dict[tuple[int, int], list[int]]: ...
185
+
186
+ class HalfspaceIntersection(_QhullUser):
187
+ interior_point: NDArray[np.float64]
188
+ dual_facets: list[list[int]]
189
+ dual_equations: NDArray[np.float64]
190
+ dual_points: NDArray[np.float64]
191
+ dual_volume: float
192
+ dual_area: float
193
+ intersections: NDArray[np.float64]
194
+ ndim: int
195
+ nineq: int
196
+
197
+ def __init__(
198
+ self,
199
+ halfspaces: ArrayLike,
200
+ interior_point: ArrayLike,
201
+ incremental: bool = ...,
202
+ qhull_options: None | str = ...
203
+ ) -> None: ...
204
+ def _update(self, qhull: _Qhull) -> None: ...
205
+ def add_halfspaces(
206
+ self,
207
+ halfspaces: ArrayLike,
208
+ restart: bool = ...
209
+ ) -> None: ...
210
+ @property
211
+ def halfspaces(self) -> NDArray[np.float64]: ...
212
+ @property
213
+ def dual_vertices(self) -> NDArray[np.integer]: ...
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_spherical_voronoi.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Spherical Voronoi Code
3
+
4
+ .. versionadded:: 0.18.0
5
+
6
+ """
7
+ #
8
+ # Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
9
+ # Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
10
+ #
11
+ # Distributed under the same BSD license as SciPy.
12
+ #
13
+
14
+ import numpy as np
15
+ import scipy
16
+ from . import _voronoi
17
+ from scipy.spatial import cKDTree
18
+
19
+ __all__ = ['SphericalVoronoi']
20
+
21
+
22
+ def calculate_solid_angles(R):
23
+ """Calculates the solid angles of plane triangles. Implements the method of
24
+ Van Oosterom and Strackee [VanOosterom]_ with some modifications. Assumes
25
+ that input points have unit norm."""
26
+ # Original method uses a triple product `R1 . (R2 x R3)` for the numerator.
27
+ # This is equal to the determinant of the matrix [R1 R2 R3], which can be
28
+ # computed with better stability.
29
+ numerator = np.linalg.det(R)
30
+ denominator = 1 + (np.einsum('ij,ij->i', R[:, 0], R[:, 1]) +
31
+ np.einsum('ij,ij->i', R[:, 1], R[:, 2]) +
32
+ np.einsum('ij,ij->i', R[:, 2], R[:, 0]))
33
+ return np.abs(2 * np.arctan2(numerator, denominator))
34
+
35
+
36
+ class SphericalVoronoi:
37
+ """ Voronoi diagrams on the surface of a sphere.
38
+
39
+ .. versionadded:: 0.18.0
40
+
41
+ Parameters
42
+ ----------
43
+ points : ndarray of floats, shape (npoints, ndim)
44
+ Coordinates of points from which to construct a spherical
45
+ Voronoi diagram.
46
+ radius : float, optional
47
+ Radius of the sphere (Default: 1)
48
+ center : ndarray of floats, shape (ndim,)
49
+ Center of sphere (Default: origin)
50
+ threshold : float
51
+ Threshold for detecting duplicate points and
52
+ mismatches between points and sphere parameters.
53
+ (Default: 1e-06)
54
+
55
+ Attributes
56
+ ----------
57
+ points : double array of shape (npoints, ndim)
58
+ the points in `ndim` dimensions to generate the Voronoi diagram from
59
+ radius : double
60
+ radius of the sphere
61
+ center : double array of shape (ndim,)
62
+ center of the sphere
63
+ vertices : double array of shape (nvertices, ndim)
64
+ Voronoi vertices corresponding to points
65
+ regions : list of list of integers of shape (npoints, _ )
66
+ the n-th entry is a list consisting of the indices
67
+ of the vertices belonging to the n-th point in points
68
+
69
+ Methods
70
+ -------
71
+ calculate_areas
72
+ Calculates the areas of the Voronoi regions. For 2D point sets, the
73
+ regions are circular arcs. The sum of the areas is `2 * pi * radius`.
74
+ For 3D point sets, the regions are spherical polygons. The sum of the
75
+ areas is `4 * pi * radius**2`.
76
+
77
+ Raises
78
+ ------
79
+ ValueError
80
+ If there are duplicates in `points`.
81
+ If the provided `radius` is not consistent with `points`.
82
+
83
+ Notes
84
+ -----
85
+ The spherical Voronoi diagram algorithm proceeds as follows. The Convex
86
+ Hull of the input points (generators) is calculated, and is equivalent to
87
+ their Delaunay triangulation on the surface of the sphere [Caroli]_.
88
+ The Convex Hull neighbour information is then used to
89
+ order the Voronoi region vertices around each generator. The latter
90
+ approach is substantially less sensitive to floating point issues than
91
+ angle-based methods of Voronoi region vertex sorting.
92
+
93
+ Empirical assessment of spherical Voronoi algorithm performance suggests
94
+ quadratic time complexity (loglinear is optimal, but algorithms are more
95
+ challenging to implement).
96
+
97
+ References
98
+ ----------
99
+ .. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
100
+ points on or close to a sphere. Research Report RR-7004, 2009.
101
+
102
+ .. [VanOosterom] Van Oosterom and Strackee. The solid angle of a plane
103
+ triangle. IEEE Transactions on Biomedical Engineering,
104
+ 2, 1983, pp 125--126.
105
+
106
+ See Also
107
+ --------
108
+ Voronoi : Conventional Voronoi diagrams in N dimensions.
109
+
110
+ Examples
111
+ --------
112
+ Do some imports and take some points on a cube:
113
+
114
+ >>> import numpy as np
115
+ >>> import matplotlib.pyplot as plt
116
+ >>> from scipy.spatial import SphericalVoronoi, geometric_slerp
117
+ >>> from mpl_toolkits.mplot3d import proj3d
118
+ >>> # set input data
119
+ >>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
120
+ ... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
121
+
122
+ Calculate the spherical Voronoi diagram:
123
+
124
+ >>> radius = 1
125
+ >>> center = np.array([0, 0, 0])
126
+ >>> sv = SphericalVoronoi(points, radius, center)
127
+
128
+ Generate plot:
129
+
130
+ >>> # sort vertices (optional, helpful for plotting)
131
+ >>> sv.sort_vertices_of_regions()
132
+ >>> t_vals = np.linspace(0, 1, 2000)
133
+ >>> fig = plt.figure()
134
+ >>> ax = fig.add_subplot(111, projection='3d')
135
+ >>> # plot the unit sphere for reference (optional)
136
+ >>> u = np.linspace(0, 2 * np.pi, 100)
137
+ >>> v = np.linspace(0, np.pi, 100)
138
+ >>> x = np.outer(np.cos(u), np.sin(v))
139
+ >>> y = np.outer(np.sin(u), np.sin(v))
140
+ >>> z = np.outer(np.ones(np.size(u)), np.cos(v))
141
+ >>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
142
+ >>> # plot generator points
143
+ >>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
144
+ >>> # plot Voronoi vertices
145
+ >>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
146
+ ... c='g')
147
+ >>> # indicate Voronoi regions (as Euclidean polygons)
148
+ >>> for region in sv.regions:
149
+ ... n = len(region)
150
+ ... for i in range(n):
151
+ ... start = sv.vertices[region][i]
152
+ ... end = sv.vertices[region][(i + 1) % n]
153
+ ... result = geometric_slerp(start, end, t_vals)
154
+ ... ax.plot(result[..., 0],
155
+ ... result[..., 1],
156
+ ... result[..., 2],
157
+ ... c='k')
158
+ >>> ax.azim = 10
159
+ >>> ax.elev = 40
160
+ >>> _ = ax.set_xticks([])
161
+ >>> _ = ax.set_yticks([])
162
+ >>> _ = ax.set_zticks([])
163
+ >>> fig.set_size_inches(4, 4)
164
+ >>> plt.show()
165
+
166
+ """
167
+ def __init__(self, points, radius=1, center=None, threshold=1e-06):
168
+
169
+ if radius is None:
170
+ raise ValueError('`radius` is `None`. '
171
+ 'Please provide a floating point number '
172
+ '(i.e. `radius=1`).')
173
+
174
+ self.radius = float(radius)
175
+ self.points = np.array(points).astype(np.float64)
176
+ self._dim = self.points.shape[1]
177
+ if center is None:
178
+ self.center = np.zeros(self._dim)
179
+ else:
180
+ self.center = np.array(center, dtype=float)
181
+
182
+ # test degenerate input
183
+ self._rank = np.linalg.matrix_rank(self.points - self.points[0],
184
+ tol=threshold * self.radius)
185
+ if self._rank < self._dim:
186
+ raise ValueError(f"Rank of input points must be at least {self._dim}")
187
+
188
+ if cKDTree(self.points).query_pairs(threshold * self.radius):
189
+ raise ValueError("Duplicate generators present.")
190
+
191
+ radii = np.linalg.norm(self.points - self.center, axis=1)
192
+ max_discrepancy = np.abs(radii - self.radius).max()
193
+ if max_discrepancy >= threshold * self.radius:
194
+ raise ValueError("Radius inconsistent with generators.")
195
+
196
+ self._calc_vertices_regions()
197
+
198
+ def _calc_vertices_regions(self):
199
+ """
200
+ Calculates the Voronoi vertices and regions of the generators stored
201
+ in self.points. The vertices will be stored in self.vertices and the
202
+ regions in self.regions.
203
+
204
+ This algorithm was discussed at PyData London 2015 by
205
+ Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
206
+ """
207
+ # get Convex Hull
208
+ conv = scipy.spatial.ConvexHull(self.points)
209
+ # get circumcenters of Convex Hull triangles from facet equations
210
+ # for 3D input circumcenters will have shape: (2N-4, 3)
211
+ self.vertices = self.radius * conv.equations[:, :-1] + self.center
212
+ self._simplices = conv.simplices
213
+ # calculate regions from triangulation
214
+ # for 3D input simplex_indices will have shape: (2N-4,)
215
+ simplex_indices = np.arange(len(self._simplices))
216
+ # for 3D input tri_indices will have shape: (6N-12,)
217
+ tri_indices = np.column_stack([simplex_indices] * self._dim).ravel()
218
+ # for 3D input point_indices will have shape: (6N-12,)
219
+ point_indices = self._simplices.ravel()
220
+ # for 3D input indices will have shape: (6N-12,)
221
+ indices = np.argsort(point_indices, kind='mergesort')
222
+ # for 3D input flattened_groups will have shape: (6N-12,)
223
+ flattened_groups = tri_indices[indices].astype(np.intp)
224
+ # intervals will have shape: (N+1,)
225
+ intervals = np.cumsum(np.bincount(point_indices + 1))
226
+ # split flattened groups to get nested list of unsorted regions
227
+ groups = [list(flattened_groups[intervals[i]:intervals[i + 1]])
228
+ for i in range(len(intervals) - 1)]
229
+ self.regions = groups
230
+
231
+ def sort_vertices_of_regions(self):
232
+ """Sort indices of the vertices to be (counter-)clockwise ordered.
233
+
234
+ Raises
235
+ ------
236
+ TypeError
237
+ If the points are not three-dimensional.
238
+
239
+ Notes
240
+ -----
241
+ For each region in regions, it sorts the indices of the Voronoi
242
+ vertices such that the resulting points are in a clockwise or
243
+ counterclockwise order around the generator point.
244
+
245
+ This is done as follows: Recall that the n-th region in regions
246
+ surrounds the n-th generator in points and that the k-th
247
+ Voronoi vertex in vertices is the circumcenter of the k-th triangle
248
+ in self._simplices. For each region n, we choose the first triangle
249
+ (=Voronoi vertex) in self._simplices and a vertex of that triangle
250
+ not equal to the center n. These determine a unique neighbor of that
251
+ triangle, which is then chosen as the second triangle. The second
252
+ triangle will have a unique vertex not equal to the current vertex or
253
+ the center. This determines a unique neighbor of the second triangle,
254
+ which is then chosen as the third triangle and so forth. We proceed
255
+ through all the triangles (=Voronoi vertices) belonging to the
256
+ generator in points and obtain a sorted version of the vertices
257
+ of its surrounding region.
258
+ """
259
+ if self._dim != 3:
260
+ raise TypeError("Only supported for three-dimensional point sets")
261
+ _voronoi.sort_vertices_of_regions(self._simplices, self.regions)
262
+
263
+ def _calculate_areas_3d(self):
264
+ self.sort_vertices_of_regions()
265
+ sizes = [len(region) for region in self.regions]
266
+ csizes = np.cumsum(sizes)
267
+ num_regions = csizes[-1]
268
+
269
+ # We create a set of triangles consisting of one point and two Voronoi
270
+ # vertices. The vertices of each triangle are adjacent in the sorted
271
+ # regions list.
272
+ point_indices = [i for i, size in enumerate(sizes)
273
+ for j in range(size)]
274
+
275
+ nbrs1 = np.array([r for region in self.regions for r in region])
276
+
277
+ # The calculation of nbrs2 is a vectorized version of:
278
+ # np.array([r for region in self.regions for r in np.roll(region, 1)])
279
+ nbrs2 = np.roll(nbrs1, 1)
280
+ indices = np.roll(csizes, 1)
281
+ indices[0] = 0
282
+ nbrs2[indices] = nbrs1[csizes - 1]
283
+
284
+ # Normalize points and vertices.
285
+ pnormalized = (self.points - self.center) / self.radius
286
+ vnormalized = (self.vertices - self.center) / self.radius
287
+
288
+ # Create the complete set of triangles and calculate their solid angles
289
+ triangles = np.hstack([pnormalized[point_indices],
290
+ vnormalized[nbrs1],
291
+ vnormalized[nbrs2]
292
+ ]).reshape((num_regions, 3, 3))
293
+ triangle_solid_angles = calculate_solid_angles(triangles)
294
+
295
+ # Sum the solid angles of the triangles in each region
296
+ solid_angles = np.cumsum(triangle_solid_angles)[csizes - 1]
297
+ solid_angles[1:] -= solid_angles[:-1]
298
+
299
+ # Get polygon areas using A = omega * r**2
300
+ return solid_angles * self.radius**2
301
+
302
+ def _calculate_areas_2d(self):
303
+ # Find start and end points of arcs
304
+ arcs = self.points[self._simplices] - self.center
305
+
306
+ # Calculate the angle subtended by arcs
307
+ d = np.sum((arcs[:, 1] - arcs[:, 0]) ** 2, axis=1)
308
+ theta = np.arccos(1 - (d / (2 * (self.radius ** 2))))
309
+
310
+ # Get areas using A = r * theta
311
+ areas = self.radius * theta
312
+
313
+ # Correct arcs which go the wrong way (single-hemisphere inputs)
314
+ signs = np.sign(np.einsum('ij,ij->i', arcs[:, 0],
315
+ self.vertices - self.center))
316
+ indices = np.where(signs < 0)
317
+ areas[indices] = 2 * np.pi * self.radius - areas[indices]
318
+ return areas
319
+
320
+ def calculate_areas(self):
321
+ """Calculates the areas of the Voronoi regions.
322
+
323
+ For 2D point sets, the regions are circular arcs. The sum of the areas
324
+ is `2 * pi * radius`.
325
+
326
+ For 3D point sets, the regions are spherical polygons. The sum of the
327
+ areas is `4 * pi * radius**2`.
328
+
329
+ .. versionadded:: 1.5.0
330
+
331
+ Returns
332
+ -------
333
+ areas : double array of shape (npoints,)
334
+ The areas of the Voronoi regions.
335
+ """
336
+ if self._dim == 2:
337
+ return self._calculate_areas_2d()
338
+ elif self._dim == 3:
339
+ return self._calculate_areas_3d()
340
+ else:
341
+ raise TypeError("Only supported for 2D and 3D point sets")
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_voronoi.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (241 kB). View file