Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/__init__.py +208 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_flow.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_laplacian.py +562 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_matching.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_min_spanning_tree.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_reordering.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_shortest_path.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_tools.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_traversal.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_validation.py +61 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__init__.py +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_pydata_sparse.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_spanning_tree.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py +119 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_conversions.py +61 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_flow.py +201 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py +369 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_matching.py +294 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_pydata_sparse.py +149 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_reordering.py +70 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.py +395 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.py +66 -0
- venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_traversal.py +81 -0
- venv/lib/python3.10/site-packages/scipy/sparse/linalg/__init__.py +146 -0
- venv/lib/python3.10/site-packages/scipy/sparse/linalg/_expm_multiply.py +810 -0
- venv/lib/python3.10/site-packages/scipy/sparse/linalg/_interface.py +896 -0
- venv/lib/python3.10/site-packages/scipy/sparse/linalg/_matfuncs.py +940 -0
- venv/lib/python3.10/site-packages/scipy/sparse/linalg/_norm.py +193 -0
- venv/lib/python3.10/site-packages/scipy/sparse/linalg/_onenormest.py +467 -0
- venv/lib/python3.10/site-packages/scipy/sparse/linalg/_special_sparse_arrays.py +948 -0
- venv/lib/python3.10/site-packages/scipy/sparse/linalg/dsolve.py +24 -0
- venv/lib/python3.10/site-packages/scipy/sparse/linalg/eigen.py +23 -0
- venv/lib/python3.10/site-packages/scipy/sparse/linalg/interface.py +22 -0
- venv/lib/python3.10/site-packages/scipy/sparse/linalg/isolve.py +22 -0
- venv/lib/python3.10/site-packages/scipy/sparse/linalg/matfuncs.py +22 -0
- venv/lib/python3.10/site-packages/scipy/sparse/tests/__init__.py +0 -0
- venv/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/__init__.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ceaa3a7b3cd3ff5c09a9742925a0f2c74915085d14afffeec3b849b0987b0c9
|
3 |
+
size 33555612
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/__init__.py
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""
|
2 |
+
Compressed sparse graph routines (:mod:`scipy.sparse.csgraph`)
|
3 |
+
==============================================================
|
4 |
+
|
5 |
+
.. currentmodule:: scipy.sparse.csgraph
|
6 |
+
|
7 |
+
Fast graph algorithms based on sparse matrix representations.
|
8 |
+
|
9 |
+
Contents
|
10 |
+
--------
|
11 |
+
|
12 |
+
.. autosummary::
|
13 |
+
:toctree: generated/
|
14 |
+
|
15 |
+
connected_components -- determine connected components of a graph
|
16 |
+
laplacian -- compute the laplacian of a graph
|
17 |
+
shortest_path -- compute the shortest path between points on a positive graph
|
18 |
+
dijkstra -- use Dijkstra's algorithm for shortest path
|
19 |
+
floyd_warshall -- use the Floyd-Warshall algorithm for shortest path
|
20 |
+
bellman_ford -- use the Bellman-Ford algorithm for shortest path
|
21 |
+
johnson -- use Johnson's algorithm for shortest path
|
22 |
+
breadth_first_order -- compute a breadth-first order of nodes
|
23 |
+
depth_first_order -- compute a depth-first order of nodes
|
24 |
+
breadth_first_tree -- construct the breadth-first tree from a given node
|
25 |
+
depth_first_tree -- construct a depth-first tree from a given node
|
26 |
+
minimum_spanning_tree -- construct the minimum spanning tree of a graph
|
27 |
+
reverse_cuthill_mckee -- compute permutation for reverse Cuthill-McKee ordering
|
28 |
+
maximum_flow -- solve the maximum flow problem for a graph
|
29 |
+
maximum_bipartite_matching -- compute a maximum matching of a bipartite graph
|
30 |
+
min_weight_full_bipartite_matching - compute a minimum weight full matching of a bipartite graph
|
31 |
+
structural_rank -- compute the structural rank of a graph
|
32 |
+
NegativeCycleError
|
33 |
+
|
34 |
+
.. autosummary::
|
35 |
+
:toctree: generated/
|
36 |
+
|
37 |
+
construct_dist_matrix
|
38 |
+
csgraph_from_dense
|
39 |
+
csgraph_from_masked
|
40 |
+
csgraph_masked_from_dense
|
41 |
+
csgraph_to_dense
|
42 |
+
csgraph_to_masked
|
43 |
+
reconstruct_path
|
44 |
+
|
45 |
+
Graph Representations
|
46 |
+
---------------------
|
47 |
+
This module uses graphs which are stored in a matrix format. A
|
48 |
+
graph with N nodes can be represented by an (N x N) adjacency matrix G.
|
49 |
+
If there is a connection from node i to node j, then G[i, j] = w, where
|
50 |
+
w is the weight of the connection. For nodes i and j which are
|
51 |
+
not connected, the value depends on the representation:
|
52 |
+
|
53 |
+
- for dense array representations, non-edges are represented by
|
54 |
+
G[i, j] = 0, infinity, or NaN.
|
55 |
+
|
56 |
+
- for dense masked representations (of type np.ma.MaskedArray), non-edges
|
57 |
+
are represented by masked values. This can be useful when graphs with
|
58 |
+
zero-weight edges are desired.
|
59 |
+
|
60 |
+
- for sparse array representations, non-edges are represented by
|
61 |
+
non-entries in the matrix. This sort of sparse representation also
|
62 |
+
allows for edges with zero weights.
|
63 |
+
|
64 |
+
As a concrete example, imagine that you would like to represent the following
|
65 |
+
undirected graph::
|
66 |
+
|
67 |
+
G
|
68 |
+
|
69 |
+
(0)
|
70 |
+
/ \
|
71 |
+
1 2
|
72 |
+
/ \
|
73 |
+
(2) (1)
|
74 |
+
|
75 |
+
This graph has three nodes, where node 0 and 1 are connected by an edge of
|
76 |
+
weight 2, and nodes 0 and 2 are connected by an edge of weight 1.
|
77 |
+
We can construct the dense, masked, and sparse representations as follows,
|
78 |
+
keeping in mind that an undirected graph is represented by a symmetric matrix::
|
79 |
+
|
80 |
+
>>> import numpy as np
|
81 |
+
>>> G_dense = np.array([[0, 2, 1],
|
82 |
+
... [2, 0, 0],
|
83 |
+
... [1, 0, 0]])
|
84 |
+
>>> G_masked = np.ma.masked_values(G_dense, 0)
|
85 |
+
>>> from scipy.sparse import csr_matrix
|
86 |
+
>>> G_sparse = csr_matrix(G_dense)
|
87 |
+
|
88 |
+
This becomes more difficult when zero edges are significant. For example,
|
89 |
+
consider the situation when we slightly modify the above graph::
|
90 |
+
|
91 |
+
G2
|
92 |
+
|
93 |
+
(0)
|
94 |
+
/ \
|
95 |
+
0 2
|
96 |
+
/ \
|
97 |
+
(2) (1)
|
98 |
+
|
99 |
+
This is identical to the previous graph, except nodes 0 and 2 are connected
|
100 |
+
by an edge of zero weight. In this case, the dense representation above
|
101 |
+
leads to ambiguities: how can non-edges be represented if zero is a meaningful
|
102 |
+
value? In this case, either a masked or sparse representation must be used
|
103 |
+
to eliminate the ambiguity::
|
104 |
+
|
105 |
+
>>> import numpy as np
|
106 |
+
>>> G2_data = np.array([[np.inf, 2, 0 ],
|
107 |
+
... [2, np.inf, np.inf],
|
108 |
+
... [0, np.inf, np.inf]])
|
109 |
+
>>> G2_masked = np.ma.masked_invalid(G2_data)
|
110 |
+
>>> from scipy.sparse.csgraph import csgraph_from_dense
|
111 |
+
>>> # G2_sparse = csr_matrix(G2_data) would give the wrong result
|
112 |
+
>>> G2_sparse = csgraph_from_dense(G2_data, null_value=np.inf)
|
113 |
+
>>> G2_sparse.data
|
114 |
+
array([ 2., 0., 2., 0.])
|
115 |
+
|
116 |
+
Here we have used a utility routine from the csgraph submodule in order to
|
117 |
+
convert the dense representation to a sparse representation which can be
|
118 |
+
understood by the algorithms in submodule. By viewing the data array, we
|
119 |
+
can see that the zero values are explicitly encoded in the graph.
|
120 |
+
|
121 |
+
Directed vs. undirected
|
122 |
+
^^^^^^^^^^^^^^^^^^^^^^^
|
123 |
+
Matrices may represent either directed or undirected graphs. This is
|
124 |
+
specified throughout the csgraph module by a boolean keyword. Graphs are
|
125 |
+
assumed to be directed by default. In a directed graph, traversal from node
|
126 |
+
i to node j can be accomplished over the edge G[i, j], but not the edge
|
127 |
+
G[j, i]. Consider the following dense graph::
|
128 |
+
|
129 |
+
>>> import numpy as np
|
130 |
+
>>> G_dense = np.array([[0, 1, 0],
|
131 |
+
... [2, 0, 3],
|
132 |
+
... [0, 4, 0]])
|
133 |
+
|
134 |
+
When ``directed=True`` we get the graph::
|
135 |
+
|
136 |
+
---1--> ---3-->
|
137 |
+
(0) (1) (2)
|
138 |
+
<--2--- <--4---
|
139 |
+
|
140 |
+
In a non-directed graph, traversal from node i to node j can be
|
141 |
+
accomplished over either G[i, j] or G[j, i]. If both edges are not null,
|
142 |
+
and the two have unequal weights, then the smaller of the two is used.
|
143 |
+
|
144 |
+
So for the same graph, when ``directed=False`` we get the graph::
|
145 |
+
|
146 |
+
(0)--1--(1)--3--(2)
|
147 |
+
|
148 |
+
Note that a symmetric matrix will represent an undirected graph, regardless
|
149 |
+
of whether the 'directed' keyword is set to True or False. In this case,
|
150 |
+
using ``directed=True`` generally leads to more efficient computation.
|
151 |
+
|
152 |
+
The routines in this module accept as input either scipy.sparse representations
|
153 |
+
(csr, csc, or lil format), masked representations, or dense representations
|
154 |
+
with non-edges indicated by zeros, infinities, and NaN entries.
|
155 |
+
""" # noqa: E501
|
156 |
+
|
157 |
+
__docformat__ = "restructuredtext en"
|
158 |
+
|
159 |
+
__all__ = ['connected_components',
|
160 |
+
'laplacian',
|
161 |
+
'shortest_path',
|
162 |
+
'floyd_warshall',
|
163 |
+
'dijkstra',
|
164 |
+
'bellman_ford',
|
165 |
+
'johnson',
|
166 |
+
'breadth_first_order',
|
167 |
+
'depth_first_order',
|
168 |
+
'breadth_first_tree',
|
169 |
+
'depth_first_tree',
|
170 |
+
'minimum_spanning_tree',
|
171 |
+
'reverse_cuthill_mckee',
|
172 |
+
'maximum_flow',
|
173 |
+
'maximum_bipartite_matching',
|
174 |
+
'min_weight_full_bipartite_matching',
|
175 |
+
'structural_rank',
|
176 |
+
'construct_dist_matrix',
|
177 |
+
'reconstruct_path',
|
178 |
+
'csgraph_masked_from_dense',
|
179 |
+
'csgraph_from_dense',
|
180 |
+
'csgraph_from_masked',
|
181 |
+
'csgraph_to_dense',
|
182 |
+
'csgraph_to_masked',
|
183 |
+
'NegativeCycleError']
|
184 |
+
|
185 |
+
from ._laplacian import laplacian
|
186 |
+
from ._shortest_path import (
|
187 |
+
shortest_path, floyd_warshall, dijkstra, bellman_ford, johnson,
|
188 |
+
NegativeCycleError
|
189 |
+
)
|
190 |
+
from ._traversal import (
|
191 |
+
breadth_first_order, depth_first_order, breadth_first_tree,
|
192 |
+
depth_first_tree, connected_components
|
193 |
+
)
|
194 |
+
from ._min_spanning_tree import minimum_spanning_tree
|
195 |
+
from ._flow import maximum_flow
|
196 |
+
from ._matching import (
|
197 |
+
maximum_bipartite_matching, min_weight_full_bipartite_matching
|
198 |
+
)
|
199 |
+
from ._reordering import reverse_cuthill_mckee, structural_rank
|
200 |
+
from ._tools import (
|
201 |
+
construct_dist_matrix, reconstruct_path, csgraph_from_dense,
|
202 |
+
csgraph_to_dense, csgraph_masked_from_dense, csgraph_from_masked,
|
203 |
+
csgraph_to_masked
|
204 |
+
)
|
205 |
+
|
206 |
+
from scipy._lib._testutils import PytestTester
|
207 |
+
test = PytestTester(__name__)
|
208 |
+
del PytestTester
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (7.46 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc
ADDED
Binary file (16.6 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc
ADDED
Binary file (1.57 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_flow.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (345 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_laplacian.py
ADDED
@@ -0,0 +1,562 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Laplacian of a compressed-sparse graph
|
3 |
+
"""
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
from scipy.sparse import issparse
|
7 |
+
from scipy.sparse.linalg import LinearOperator
|
8 |
+
from scipy.sparse._sputils import convert_pydata_sparse_to_scipy, is_pydata_spmatrix
|
9 |
+
|
10 |
+
|
11 |
+
###############################################################################
|
12 |
+
# Graph laplacian
|
13 |
+
def laplacian(
|
14 |
+
csgraph,
|
15 |
+
normed=False,
|
16 |
+
return_diag=False,
|
17 |
+
use_out_degree=False,
|
18 |
+
*,
|
19 |
+
copy=True,
|
20 |
+
form="array",
|
21 |
+
dtype=None,
|
22 |
+
symmetrized=False,
|
23 |
+
):
|
24 |
+
"""
|
25 |
+
Return the Laplacian of a directed graph.
|
26 |
+
|
27 |
+
Parameters
|
28 |
+
----------
|
29 |
+
csgraph : array_like or sparse matrix, 2 dimensions
|
30 |
+
compressed-sparse graph, with shape (N, N).
|
31 |
+
normed : bool, optional
|
32 |
+
If True, then compute symmetrically normalized Laplacian.
|
33 |
+
Default: False.
|
34 |
+
return_diag : bool, optional
|
35 |
+
If True, then also return an array related to vertex degrees.
|
36 |
+
Default: False.
|
37 |
+
use_out_degree : bool, optional
|
38 |
+
If True, then use out-degree instead of in-degree.
|
39 |
+
This distinction matters only if the graph is asymmetric.
|
40 |
+
Default: False.
|
41 |
+
copy: bool, optional
|
42 |
+
If False, then change `csgraph` in place if possible,
|
43 |
+
avoiding doubling the memory use.
|
44 |
+
Default: True, for backward compatibility.
|
45 |
+
form: 'array', or 'function', or 'lo'
|
46 |
+
Determines the format of the output Laplacian:
|
47 |
+
|
48 |
+
* 'array' is a numpy array;
|
49 |
+
* 'function' is a pointer to evaluating the Laplacian-vector
|
50 |
+
or Laplacian-matrix product;
|
51 |
+
* 'lo' results in the format of the `LinearOperator`.
|
52 |
+
|
53 |
+
Choosing 'function' or 'lo' always avoids doubling
|
54 |
+
the memory use, ignoring `copy` value.
|
55 |
+
Default: 'array', for backward compatibility.
|
56 |
+
dtype: None or one of numeric numpy dtypes, optional
|
57 |
+
The dtype of the output. If ``dtype=None``, the dtype of the
|
58 |
+
output matches the dtype of the input csgraph, except for
|
59 |
+
the case ``normed=True`` and integer-like csgraph, where
|
60 |
+
the output dtype is 'float' allowing accurate normalization,
|
61 |
+
but dramatically increasing the memory use.
|
62 |
+
Default: None, for backward compatibility.
|
63 |
+
symmetrized: bool, optional
|
64 |
+
If True, then the output Laplacian is symmetric/Hermitian.
|
65 |
+
The symmetrization is done by ``csgraph + csgraph.T.conj``
|
66 |
+
without dividing by 2 to preserve integer dtypes if possible
|
67 |
+
prior to the construction of the Laplacian.
|
68 |
+
The symmetrization will increase the memory footprint of
|
69 |
+
sparse matrices unless the sparsity pattern is symmetric or
|
70 |
+
`form` is 'function' or 'lo'.
|
71 |
+
Default: False, for backward compatibility.
|
72 |
+
|
73 |
+
Returns
|
74 |
+
-------
|
75 |
+
lap : ndarray, or sparse matrix, or `LinearOperator`
|
76 |
+
The N x N Laplacian of csgraph. It will be a NumPy array (dense)
|
77 |
+
if the input was dense, or a sparse matrix otherwise, or
|
78 |
+
the format of a function or `LinearOperator` if
|
79 |
+
`form` equals 'function' or 'lo', respectively.
|
80 |
+
diag : ndarray, optional
|
81 |
+
The length-N main diagonal of the Laplacian matrix.
|
82 |
+
For the normalized Laplacian, this is the array of square roots
|
83 |
+
of vertex degrees or 1 if the degree is zero.
|
84 |
+
|
85 |
+
Notes
|
86 |
+
-----
|
87 |
+
The Laplacian matrix of a graph is sometimes referred to as the
|
88 |
+
"Kirchhoff matrix" or just the "Laplacian", and is useful in many
|
89 |
+
parts of spectral graph theory.
|
90 |
+
In particular, the eigen-decomposition of the Laplacian can give
|
91 |
+
insight into many properties of the graph, e.g.,
|
92 |
+
is commonly used for spectral data embedding and clustering.
|
93 |
+
|
94 |
+
The constructed Laplacian doubles the memory use if ``copy=True`` and
|
95 |
+
``form="array"`` which is the default.
|
96 |
+
Choosing ``copy=False`` has no effect unless ``form="array"``
|
97 |
+
or the matrix is sparse in the ``coo`` format, or dense array, except
|
98 |
+
for the integer input with ``normed=True`` that forces the float output.
|
99 |
+
|
100 |
+
Sparse input is reformatted into ``coo`` if ``form="array"``,
|
101 |
+
which is the default.
|
102 |
+
|
103 |
+
If the input adjacency matrix is not symmetric, the Laplacian is
|
104 |
+
also non-symmetric unless ``symmetrized=True`` is used.
|
105 |
+
|
106 |
+
Diagonal entries of the input adjacency matrix are ignored and
|
107 |
+
replaced with zeros for the purpose of normalization where ``normed=True``.
|
108 |
+
The normalization uses the inverse square roots of row-sums of the input
|
109 |
+
adjacency matrix, and thus may fail if the row-sums contain
|
110 |
+
negative or complex with a non-zero imaginary part values.
|
111 |
+
|
112 |
+
The normalization is symmetric, making the normalized Laplacian also
|
113 |
+
symmetric if the input csgraph was symmetric.
|
114 |
+
|
115 |
+
References
|
116 |
+
----------
|
117 |
+
.. [1] Laplacian matrix. https://en.wikipedia.org/wiki/Laplacian_matrix
|
118 |
+
|
119 |
+
Examples
|
120 |
+
--------
|
121 |
+
>>> import numpy as np
|
122 |
+
>>> from scipy.sparse import csgraph
|
123 |
+
|
124 |
+
Our first illustration is the symmetric graph
|
125 |
+
|
126 |
+
>>> G = np.arange(4) * np.arange(4)[:, np.newaxis]
|
127 |
+
>>> G
|
128 |
+
array([[0, 0, 0, 0],
|
129 |
+
[0, 1, 2, 3],
|
130 |
+
[0, 2, 4, 6],
|
131 |
+
[0, 3, 6, 9]])
|
132 |
+
|
133 |
+
and its symmetric Laplacian matrix
|
134 |
+
|
135 |
+
>>> csgraph.laplacian(G)
|
136 |
+
array([[ 0, 0, 0, 0],
|
137 |
+
[ 0, 5, -2, -3],
|
138 |
+
[ 0, -2, 8, -6],
|
139 |
+
[ 0, -3, -6, 9]])
|
140 |
+
|
141 |
+
The non-symmetric graph
|
142 |
+
|
143 |
+
>>> G = np.arange(9).reshape(3, 3)
|
144 |
+
>>> G
|
145 |
+
array([[0, 1, 2],
|
146 |
+
[3, 4, 5],
|
147 |
+
[6, 7, 8]])
|
148 |
+
|
149 |
+
has different row- and column sums, resulting in two varieties
|
150 |
+
of the Laplacian matrix, using an in-degree, which is the default
|
151 |
+
|
152 |
+
>>> L_in_degree = csgraph.laplacian(G)
|
153 |
+
>>> L_in_degree
|
154 |
+
array([[ 9, -1, -2],
|
155 |
+
[-3, 8, -5],
|
156 |
+
[-6, -7, 7]])
|
157 |
+
|
158 |
+
or alternatively an out-degree
|
159 |
+
|
160 |
+
>>> L_out_degree = csgraph.laplacian(G, use_out_degree=True)
|
161 |
+
>>> L_out_degree
|
162 |
+
array([[ 3, -1, -2],
|
163 |
+
[-3, 8, -5],
|
164 |
+
[-6, -7, 13]])
|
165 |
+
|
166 |
+
Constructing a symmetric Laplacian matrix, one can add the two as
|
167 |
+
|
168 |
+
>>> L_in_degree + L_out_degree.T
|
169 |
+
array([[ 12, -4, -8],
|
170 |
+
[ -4, 16, -12],
|
171 |
+
[ -8, -12, 20]])
|
172 |
+
|
173 |
+
or use the ``symmetrized=True`` option
|
174 |
+
|
175 |
+
>>> csgraph.laplacian(G, symmetrized=True)
|
176 |
+
array([[ 12, -4, -8],
|
177 |
+
[ -4, 16, -12],
|
178 |
+
[ -8, -12, 20]])
|
179 |
+
|
180 |
+
that is equivalent to symmetrizing the original graph
|
181 |
+
|
182 |
+
>>> csgraph.laplacian(G + G.T)
|
183 |
+
array([[ 12, -4, -8],
|
184 |
+
[ -4, 16, -12],
|
185 |
+
[ -8, -12, 20]])
|
186 |
+
|
187 |
+
The goal of normalization is to make the non-zero diagonal entries
|
188 |
+
of the Laplacian matrix to be all unit, also scaling off-diagonal
|
189 |
+
entries correspondingly. The normalization can be done manually, e.g.,
|
190 |
+
|
191 |
+
>>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
|
192 |
+
>>> L, d = csgraph.laplacian(G, return_diag=True)
|
193 |
+
>>> L
|
194 |
+
array([[ 2, -1, -1],
|
195 |
+
[-1, 2, -1],
|
196 |
+
[-1, -1, 2]])
|
197 |
+
>>> d
|
198 |
+
array([2, 2, 2])
|
199 |
+
>>> scaling = np.sqrt(d)
|
200 |
+
>>> scaling
|
201 |
+
array([1.41421356, 1.41421356, 1.41421356])
|
202 |
+
>>> (1/scaling)*L*(1/scaling)
|
203 |
+
array([[ 1. , -0.5, -0.5],
|
204 |
+
[-0.5, 1. , -0.5],
|
205 |
+
[-0.5, -0.5, 1. ]])
|
206 |
+
|
207 |
+
Or using ``normed=True`` option
|
208 |
+
|
209 |
+
>>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
|
210 |
+
>>> L
|
211 |
+
array([[ 1. , -0.5, -0.5],
|
212 |
+
[-0.5, 1. , -0.5],
|
213 |
+
[-0.5, -0.5, 1. ]])
|
214 |
+
|
215 |
+
which now instead of the diagonal returns the scaling coefficients
|
216 |
+
|
217 |
+
>>> d
|
218 |
+
array([1.41421356, 1.41421356, 1.41421356])
|
219 |
+
|
220 |
+
Zero scaling coefficients are substituted with 1s, where scaling
|
221 |
+
has thus no effect, e.g.,
|
222 |
+
|
223 |
+
>>> G = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]])
|
224 |
+
>>> G
|
225 |
+
array([[0, 0, 0],
|
226 |
+
[0, 0, 1],
|
227 |
+
[0, 1, 0]])
|
228 |
+
>>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
|
229 |
+
>>> L
|
230 |
+
array([[ 0., -0., -0.],
|
231 |
+
[-0., 1., -1.],
|
232 |
+
[-0., -1., 1.]])
|
233 |
+
>>> d
|
234 |
+
array([1., 1., 1.])
|
235 |
+
|
236 |
+
Only the symmetric normalization is implemented, resulting
|
237 |
+
in a symmetric Laplacian matrix if and only if its graph is symmetric
|
238 |
+
and has all non-negative degrees, like in the examples above.
|
239 |
+
|
240 |
+
The output Laplacian matrix is by default a dense array or a sparse matrix
|
241 |
+
inferring its shape, format, and dtype from the input graph matrix:
|
242 |
+
|
243 |
+
>>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype(np.float32)
|
244 |
+
>>> G
|
245 |
+
array([[0., 1., 1.],
|
246 |
+
[1., 0., 1.],
|
247 |
+
[1., 1., 0.]], dtype=float32)
|
248 |
+
>>> csgraph.laplacian(G)
|
249 |
+
array([[ 2., -1., -1.],
|
250 |
+
[-1., 2., -1.],
|
251 |
+
[-1., -1., 2.]], dtype=float32)
|
252 |
+
|
253 |
+
but can alternatively be generated matrix-free as a LinearOperator:
|
254 |
+
|
255 |
+
>>> L = csgraph.laplacian(G, form="lo")
|
256 |
+
>>> L
|
257 |
+
<3x3 _CustomLinearOperator with dtype=float32>
|
258 |
+
>>> L(np.eye(3))
|
259 |
+
array([[ 2., -1., -1.],
|
260 |
+
[-1., 2., -1.],
|
261 |
+
[-1., -1., 2.]])
|
262 |
+
|
263 |
+
or as a lambda-function:
|
264 |
+
|
265 |
+
>>> L = csgraph.laplacian(G, form="function")
|
266 |
+
>>> L
|
267 |
+
<function _laplace.<locals>.<lambda> at 0x0000012AE6F5A598>
|
268 |
+
>>> L(np.eye(3))
|
269 |
+
array([[ 2., -1., -1.],
|
270 |
+
[-1., 2., -1.],
|
271 |
+
[-1., -1., 2.]])
|
272 |
+
|
273 |
+
The Laplacian matrix is used for
|
274 |
+
spectral data clustering and embedding
|
275 |
+
as well as for spectral graph partitioning.
|
276 |
+
Our final example illustrates the latter
|
277 |
+
for a noisy directed linear graph.
|
278 |
+
|
279 |
+
>>> from scipy.sparse import diags, random
|
280 |
+
>>> from scipy.sparse.linalg import lobpcg
|
281 |
+
|
282 |
+
Create a directed linear graph with ``N=35`` vertices
|
283 |
+
using a sparse adjacency matrix ``G``:
|
284 |
+
|
285 |
+
>>> N = 35
|
286 |
+
>>> G = diags(np.ones(N-1), 1, format="csr")
|
287 |
+
|
288 |
+
Fix a random seed ``rng`` and add a random sparse noise to the graph ``G``:
|
289 |
+
|
290 |
+
>>> rng = np.random.default_rng()
|
291 |
+
>>> G += 1e-2 * random(N, N, density=0.1, random_state=rng)
|
292 |
+
|
293 |
+
Set initial approximations for eigenvectors:
|
294 |
+
|
295 |
+
>>> X = rng.random((N, 2))
|
296 |
+
|
297 |
+
The constant vector of ones is always a trivial eigenvector
|
298 |
+
of the non-normalized Laplacian to be filtered out:
|
299 |
+
|
300 |
+
>>> Y = np.ones((N, 1))
|
301 |
+
|
302 |
+
Alternating (1) the sign of the graph weights allows determining
|
303 |
+
labels for spectral max- and min- cuts in a single loop.
|
304 |
+
Since the graph is undirected, the option ``symmetrized=True``
|
305 |
+
must be used in the construction of the Laplacian.
|
306 |
+
The option ``normed=True`` cannot be used in (2) for the negative weights
|
307 |
+
here as the symmetric normalization evaluates square roots.
|
308 |
+
The option ``form="lo"`` in (2) is matrix-free, i.e., guarantees
|
309 |
+
a fixed memory footprint and read-only access to the graph.
|
310 |
+
Calling the eigenvalue solver ``lobpcg`` (3) computes the Fiedler vector
|
311 |
+
that determines the labels as the signs of its components in (5).
|
312 |
+
Since the sign in an eigenvector is not deterministic and can flip,
|
313 |
+
we fix the sign of the first component to be always +1 in (4).
|
314 |
+
|
315 |
+
>>> for cut in ["max", "min"]:
|
316 |
+
... G = -G # 1.
|
317 |
+
... L = csgraph.laplacian(G, symmetrized=True, form="lo") # 2.
|
318 |
+
... _, eves = lobpcg(L, X, Y=Y, largest=False, tol=1e-3) # 3.
|
319 |
+
... eves *= np.sign(eves[0, 0]) # 4.
|
320 |
+
... print(cut + "-cut labels:\\n", 1 * (eves[:, 0]>0)) # 5.
|
321 |
+
max-cut labels:
|
322 |
+
[1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]
|
323 |
+
min-cut labels:
|
324 |
+
[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
|
325 |
+
|
326 |
+
As anticipated for a (slightly noisy) linear graph,
|
327 |
+
the max-cut strips all the edges of the graph coloring all
|
328 |
+
odd vertices into one color and all even vertices into another one,
|
329 |
+
while the balanced min-cut partitions the graph
|
330 |
+
in the middle by deleting a single edge.
|
331 |
+
Both determined partitions are optimal.
|
332 |
+
"""
|
333 |
+
is_pydata_sparse = is_pydata_spmatrix(csgraph)
|
334 |
+
if is_pydata_sparse:
|
335 |
+
pydata_sparse_cls = csgraph.__class__
|
336 |
+
csgraph = convert_pydata_sparse_to_scipy(csgraph)
|
337 |
+
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
|
338 |
+
raise ValueError('csgraph must be a square matrix or array')
|
339 |
+
|
340 |
+
if normed and (
|
341 |
+
np.issubdtype(csgraph.dtype, np.signedinteger)
|
342 |
+
or np.issubdtype(csgraph.dtype, np.uint)
|
343 |
+
):
|
344 |
+
csgraph = csgraph.astype(np.float64)
|
345 |
+
|
346 |
+
if form == "array":
|
347 |
+
create_lap = (
|
348 |
+
_laplacian_sparse if issparse(csgraph) else _laplacian_dense
|
349 |
+
)
|
350 |
+
else:
|
351 |
+
create_lap = (
|
352 |
+
_laplacian_sparse_flo
|
353 |
+
if issparse(csgraph)
|
354 |
+
else _laplacian_dense_flo
|
355 |
+
)
|
356 |
+
|
357 |
+
degree_axis = 1 if use_out_degree else 0
|
358 |
+
|
359 |
+
lap, d = create_lap(
|
360 |
+
csgraph,
|
361 |
+
normed=normed,
|
362 |
+
axis=degree_axis,
|
363 |
+
copy=copy,
|
364 |
+
form=form,
|
365 |
+
dtype=dtype,
|
366 |
+
symmetrized=symmetrized,
|
367 |
+
)
|
368 |
+
if is_pydata_sparse:
|
369 |
+
lap = pydata_sparse_cls.from_scipy_sparse(lap)
|
370 |
+
if return_diag:
|
371 |
+
return lap, d
|
372 |
+
return lap
|
373 |
+
|
374 |
+
|
375 |
+
def _setdiag_dense(m, d):
|
376 |
+
step = len(d) + 1
|
377 |
+
m.flat[::step] = d
|
378 |
+
|
379 |
+
|
380 |
+
def _laplace(m, d):
|
381 |
+
return lambda v: v * d[:, np.newaxis] - m @ v
|
382 |
+
|
383 |
+
|
384 |
+
def _laplace_normed(m, d, nd):
|
385 |
+
laplace = _laplace(m, d)
|
386 |
+
return lambda v: nd[:, np.newaxis] * laplace(v * nd[:, np.newaxis])
|
387 |
+
|
388 |
+
|
389 |
+
def _laplace_sym(m, d):
|
390 |
+
return (
|
391 |
+
lambda v: v * d[:, np.newaxis]
|
392 |
+
- m @ v
|
393 |
+
- np.transpose(np.conjugate(np.transpose(np.conjugate(v)) @ m))
|
394 |
+
)
|
395 |
+
|
396 |
+
|
397 |
+
def _laplace_normed_sym(m, d, nd):
|
398 |
+
laplace_sym = _laplace_sym(m, d)
|
399 |
+
return lambda v: nd[:, np.newaxis] * laplace_sym(v * nd[:, np.newaxis])
|
400 |
+
|
401 |
+
|
402 |
+
def _linearoperator(mv, shape, dtype):
|
403 |
+
return LinearOperator(matvec=mv, matmat=mv, shape=shape, dtype=dtype)
|
404 |
+
|
405 |
+
|
406 |
+
def _laplacian_sparse_flo(graph, normed, axis, copy, form, dtype, symmetrized):
|
407 |
+
# The keyword argument `copy` is unused and has no effect here.
|
408 |
+
del copy
|
409 |
+
|
410 |
+
if dtype is None:
|
411 |
+
dtype = graph.dtype
|
412 |
+
|
413 |
+
graph_sum = np.asarray(graph.sum(axis=axis)).ravel()
|
414 |
+
graph_diagonal = graph.diagonal()
|
415 |
+
diag = graph_sum - graph_diagonal
|
416 |
+
if symmetrized:
|
417 |
+
graph_sum += np.asarray(graph.sum(axis=1 - axis)).ravel()
|
418 |
+
diag = graph_sum - graph_diagonal - graph_diagonal
|
419 |
+
|
420 |
+
if normed:
|
421 |
+
isolated_node_mask = diag == 0
|
422 |
+
w = np.where(isolated_node_mask, 1, np.sqrt(diag))
|
423 |
+
if symmetrized:
|
424 |
+
md = _laplace_normed_sym(graph, graph_sum, 1.0 / w)
|
425 |
+
else:
|
426 |
+
md = _laplace_normed(graph, graph_sum, 1.0 / w)
|
427 |
+
if form == "function":
|
428 |
+
return md, w.astype(dtype, copy=False)
|
429 |
+
elif form == "lo":
|
430 |
+
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
|
431 |
+
return m, w.astype(dtype, copy=False)
|
432 |
+
else:
|
433 |
+
raise ValueError(f"Invalid form: {form!r}")
|
434 |
+
else:
|
435 |
+
if symmetrized:
|
436 |
+
md = _laplace_sym(graph, graph_sum)
|
437 |
+
else:
|
438 |
+
md = _laplace(graph, graph_sum)
|
439 |
+
if form == "function":
|
440 |
+
return md, diag.astype(dtype, copy=False)
|
441 |
+
elif form == "lo":
|
442 |
+
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
|
443 |
+
return m, diag.astype(dtype, copy=False)
|
444 |
+
else:
|
445 |
+
raise ValueError(f"Invalid form: {form!r}")
|
446 |
+
|
447 |
+
|
448 |
+
def _laplacian_sparse(graph, normed, axis, copy, form, dtype, symmetrized):
|
449 |
+
# The keyword argument `form` is unused and has no effect here.
|
450 |
+
del form
|
451 |
+
|
452 |
+
if dtype is None:
|
453 |
+
dtype = graph.dtype
|
454 |
+
|
455 |
+
needs_copy = False
|
456 |
+
if graph.format in ('lil', 'dok'):
|
457 |
+
m = graph.tocoo()
|
458 |
+
else:
|
459 |
+
m = graph
|
460 |
+
if copy:
|
461 |
+
needs_copy = True
|
462 |
+
|
463 |
+
if symmetrized:
|
464 |
+
m += m.T.conj()
|
465 |
+
|
466 |
+
w = np.asarray(m.sum(axis=axis)).ravel() - m.diagonal()
|
467 |
+
if normed:
|
468 |
+
m = m.tocoo(copy=needs_copy)
|
469 |
+
isolated_node_mask = (w == 0)
|
470 |
+
w = np.where(isolated_node_mask, 1, np.sqrt(w))
|
471 |
+
m.data /= w[m.row]
|
472 |
+
m.data /= w[m.col]
|
473 |
+
m.data *= -1
|
474 |
+
m.setdiag(1 - isolated_node_mask)
|
475 |
+
else:
|
476 |
+
if m.format == 'dia':
|
477 |
+
m = m.copy()
|
478 |
+
else:
|
479 |
+
m = m.tocoo(copy=needs_copy)
|
480 |
+
m.data *= -1
|
481 |
+
m.setdiag(w)
|
482 |
+
|
483 |
+
return m.astype(dtype, copy=False), w.astype(dtype)
|
484 |
+
|
485 |
+
|
486 |
+
def _laplacian_dense_flo(graph, normed, axis, copy, form, dtype, symmetrized):
|
487 |
+
|
488 |
+
if copy:
|
489 |
+
m = np.array(graph)
|
490 |
+
else:
|
491 |
+
m = np.asarray(graph)
|
492 |
+
|
493 |
+
if dtype is None:
|
494 |
+
dtype = m.dtype
|
495 |
+
|
496 |
+
graph_sum = m.sum(axis=axis)
|
497 |
+
graph_diagonal = m.diagonal()
|
498 |
+
diag = graph_sum - graph_diagonal
|
499 |
+
if symmetrized:
|
500 |
+
graph_sum += m.sum(axis=1 - axis)
|
501 |
+
diag = graph_sum - graph_diagonal - graph_diagonal
|
502 |
+
|
503 |
+
if normed:
|
504 |
+
isolated_node_mask = diag == 0
|
505 |
+
w = np.where(isolated_node_mask, 1, np.sqrt(diag))
|
506 |
+
if symmetrized:
|
507 |
+
md = _laplace_normed_sym(m, graph_sum, 1.0 / w)
|
508 |
+
else:
|
509 |
+
md = _laplace_normed(m, graph_sum, 1.0 / w)
|
510 |
+
if form == "function":
|
511 |
+
return md, w.astype(dtype, copy=False)
|
512 |
+
elif form == "lo":
|
513 |
+
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
|
514 |
+
return m, w.astype(dtype, copy=False)
|
515 |
+
else:
|
516 |
+
raise ValueError(f"Invalid form: {form!r}")
|
517 |
+
else:
|
518 |
+
if symmetrized:
|
519 |
+
md = _laplace_sym(m, graph_sum)
|
520 |
+
else:
|
521 |
+
md = _laplace(m, graph_sum)
|
522 |
+
if form == "function":
|
523 |
+
return md, diag.astype(dtype, copy=False)
|
524 |
+
elif form == "lo":
|
525 |
+
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
|
526 |
+
return m, diag.astype(dtype, copy=False)
|
527 |
+
else:
|
528 |
+
raise ValueError(f"Invalid form: {form!r}")
|
529 |
+
|
530 |
+
|
531 |
+
def _laplacian_dense(graph, normed, axis, copy, form, dtype, symmetrized):
|
532 |
+
|
533 |
+
if form != "array":
|
534 |
+
raise ValueError(f'{form!r} must be "array"')
|
535 |
+
|
536 |
+
if dtype is None:
|
537 |
+
dtype = graph.dtype
|
538 |
+
|
539 |
+
if copy:
|
540 |
+
m = np.array(graph)
|
541 |
+
else:
|
542 |
+
m = np.asarray(graph)
|
543 |
+
|
544 |
+
if dtype is None:
|
545 |
+
dtype = m.dtype
|
546 |
+
|
547 |
+
if symmetrized:
|
548 |
+
m += m.T.conj()
|
549 |
+
np.fill_diagonal(m, 0)
|
550 |
+
w = m.sum(axis=axis)
|
551 |
+
if normed:
|
552 |
+
isolated_node_mask = (w == 0)
|
553 |
+
w = np.where(isolated_node_mask, 1, np.sqrt(w))
|
554 |
+
m /= w
|
555 |
+
m /= w[:, np.newaxis]
|
556 |
+
m *= -1
|
557 |
+
_setdiag_dense(m, 1 - isolated_node_mask)
|
558 |
+
else:
|
559 |
+
m *= -1
|
560 |
+
_setdiag_dense(m, w)
|
561 |
+
|
562 |
+
return m.astype(dtype, copy=False), w.astype(dtype, copy=False)
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_matching.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (348 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_min_spanning_tree.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (259 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_reordering.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (332 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_shortest_path.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (485 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_tools.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (205 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_traversal.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (659 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/_validation.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from scipy.sparse import csr_matrix, issparse
|
3 |
+
from scipy.sparse._sputils import convert_pydata_sparse_to_scipy
|
4 |
+
from scipy.sparse.csgraph._tools import (
|
5 |
+
csgraph_to_dense, csgraph_from_dense,
|
6 |
+
csgraph_masked_from_dense, csgraph_from_masked
|
7 |
+
)
|
8 |
+
|
9 |
+
DTYPE = np.float64
|
10 |
+
|
11 |
+
|
12 |
+
def validate_graph(csgraph, directed, dtype=DTYPE,
|
13 |
+
csr_output=True, dense_output=True,
|
14 |
+
copy_if_dense=False, copy_if_sparse=False,
|
15 |
+
null_value_in=0, null_value_out=np.inf,
|
16 |
+
infinity_null=True, nan_null=True):
|
17 |
+
"""Routine for validation and conversion of csgraph inputs"""
|
18 |
+
if not (csr_output or dense_output):
|
19 |
+
raise ValueError("Internal: dense or csr output must be true")
|
20 |
+
|
21 |
+
csgraph = convert_pydata_sparse_to_scipy(csgraph)
|
22 |
+
|
23 |
+
# if undirected and csc storage, then transposing in-place
|
24 |
+
# is quicker than later converting to csr.
|
25 |
+
if (not directed) and issparse(csgraph) and csgraph.format == "csc":
|
26 |
+
csgraph = csgraph.T
|
27 |
+
|
28 |
+
if issparse(csgraph):
|
29 |
+
if csr_output:
|
30 |
+
csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse)
|
31 |
+
else:
|
32 |
+
csgraph = csgraph_to_dense(csgraph, null_value=null_value_out)
|
33 |
+
elif np.ma.isMaskedArray(csgraph):
|
34 |
+
if dense_output:
|
35 |
+
mask = csgraph.mask
|
36 |
+
csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense)
|
37 |
+
csgraph[mask] = null_value_out
|
38 |
+
else:
|
39 |
+
csgraph = csgraph_from_masked(csgraph)
|
40 |
+
else:
|
41 |
+
if dense_output:
|
42 |
+
csgraph = csgraph_masked_from_dense(csgraph,
|
43 |
+
copy=copy_if_dense,
|
44 |
+
null_value=null_value_in,
|
45 |
+
nan_null=nan_null,
|
46 |
+
infinity_null=infinity_null)
|
47 |
+
mask = csgraph.mask
|
48 |
+
csgraph = np.asarray(csgraph.data, dtype=DTYPE)
|
49 |
+
csgraph[mask] = null_value_out
|
50 |
+
else:
|
51 |
+
csgraph = csgraph_from_dense(csgraph, null_value=null_value_in,
|
52 |
+
infinity_null=infinity_null,
|
53 |
+
nan_null=nan_null)
|
54 |
+
|
55 |
+
if csgraph.ndim != 2:
|
56 |
+
raise ValueError("compressed-sparse graph must be 2-D")
|
57 |
+
|
58 |
+
if csgraph.shape[0] != csgraph.shape[1]:
|
59 |
+
raise ValueError("compressed-sparse graph must be shape (N, N)")
|
60 |
+
|
61 |
+
return csgraph
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (194 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc
ADDED
Binary file (3.23 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc
ADDED
Binary file (1.74 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc
ADDED
Binary file (7 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc
ADDED
Binary file (7.72 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc
ADDED
Binary file (10.4 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_pydata_sparse.cpython-310.pyc
ADDED
Binary file (3.62 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc
ADDED
Binary file (2.94 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc
ADDED
Binary file (12 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_spanning_tree.cpython-310.pyc
ADDED
Binary file (1.54 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc
ADDED
Binary file (2.28 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.testing import assert_equal, assert_array_almost_equal
|
3 |
+
from scipy.sparse import csgraph, csr_array
|
4 |
+
|
5 |
+
|
6 |
+
def test_weak_connections():
|
7 |
+
Xde = np.array([[0, 1, 0],
|
8 |
+
[0, 0, 0],
|
9 |
+
[0, 0, 0]])
|
10 |
+
|
11 |
+
Xsp = csgraph.csgraph_from_dense(Xde, null_value=0)
|
12 |
+
|
13 |
+
for X in Xsp, Xde:
|
14 |
+
n_components, labels =\
|
15 |
+
csgraph.connected_components(X, directed=True,
|
16 |
+
connection='weak')
|
17 |
+
|
18 |
+
assert_equal(n_components, 2)
|
19 |
+
assert_array_almost_equal(labels, [0, 0, 1])
|
20 |
+
|
21 |
+
|
22 |
+
def test_strong_connections():
|
23 |
+
X1de = np.array([[0, 1, 0],
|
24 |
+
[0, 0, 0],
|
25 |
+
[0, 0, 0]])
|
26 |
+
X2de = X1de + X1de.T
|
27 |
+
|
28 |
+
X1sp = csgraph.csgraph_from_dense(X1de, null_value=0)
|
29 |
+
X2sp = csgraph.csgraph_from_dense(X2de, null_value=0)
|
30 |
+
|
31 |
+
for X in X1sp, X1de:
|
32 |
+
n_components, labels =\
|
33 |
+
csgraph.connected_components(X, directed=True,
|
34 |
+
connection='strong')
|
35 |
+
|
36 |
+
assert_equal(n_components, 3)
|
37 |
+
labels.sort()
|
38 |
+
assert_array_almost_equal(labels, [0, 1, 2])
|
39 |
+
|
40 |
+
for X in X2sp, X2de:
|
41 |
+
n_components, labels =\
|
42 |
+
csgraph.connected_components(X, directed=True,
|
43 |
+
connection='strong')
|
44 |
+
|
45 |
+
assert_equal(n_components, 2)
|
46 |
+
labels.sort()
|
47 |
+
assert_array_almost_equal(labels, [0, 0, 1])
|
48 |
+
|
49 |
+
|
50 |
+
def test_strong_connections2():
|
51 |
+
X = np.array([[0, 0, 0, 0, 0, 0],
|
52 |
+
[1, 0, 1, 0, 0, 0],
|
53 |
+
[0, 0, 0, 1, 0, 0],
|
54 |
+
[0, 0, 1, 0, 1, 0],
|
55 |
+
[0, 0, 0, 0, 0, 0],
|
56 |
+
[0, 0, 0, 0, 1, 0]])
|
57 |
+
n_components, labels =\
|
58 |
+
csgraph.connected_components(X, directed=True,
|
59 |
+
connection='strong')
|
60 |
+
assert_equal(n_components, 5)
|
61 |
+
labels.sort()
|
62 |
+
assert_array_almost_equal(labels, [0, 1, 2, 2, 3, 4])
|
63 |
+
|
64 |
+
|
65 |
+
def test_weak_connections2():
|
66 |
+
X = np.array([[0, 0, 0, 0, 0, 0],
|
67 |
+
[1, 0, 0, 0, 0, 0],
|
68 |
+
[0, 0, 0, 1, 0, 0],
|
69 |
+
[0, 0, 1, 0, 1, 0],
|
70 |
+
[0, 0, 0, 0, 0, 0],
|
71 |
+
[0, 0, 0, 0, 1, 0]])
|
72 |
+
n_components, labels =\
|
73 |
+
csgraph.connected_components(X, directed=True,
|
74 |
+
connection='weak')
|
75 |
+
assert_equal(n_components, 2)
|
76 |
+
labels.sort()
|
77 |
+
assert_array_almost_equal(labels, [0, 0, 1, 1, 1, 1])
|
78 |
+
|
79 |
+
|
80 |
+
def test_ticket1876():
|
81 |
+
# Regression test: this failed in the original implementation
|
82 |
+
# There should be two strongly-connected components; previously gave one
|
83 |
+
g = np.array([[0, 1, 1, 0],
|
84 |
+
[1, 0, 0, 1],
|
85 |
+
[0, 0, 0, 1],
|
86 |
+
[0, 0, 1, 0]])
|
87 |
+
n_components, labels = csgraph.connected_components(g, connection='strong')
|
88 |
+
|
89 |
+
assert_equal(n_components, 2)
|
90 |
+
assert_equal(labels[0], labels[1])
|
91 |
+
assert_equal(labels[2], labels[3])
|
92 |
+
|
93 |
+
|
94 |
+
def test_fully_connected_graph():
|
95 |
+
# Fully connected dense matrices raised an exception.
|
96 |
+
# https://github.com/scipy/scipy/issues/3818
|
97 |
+
g = np.ones((4, 4))
|
98 |
+
n_components, labels = csgraph.connected_components(g)
|
99 |
+
assert_equal(n_components, 1)
|
100 |
+
|
101 |
+
|
102 |
+
def test_int64_indices_undirected():
|
103 |
+
# See https://github.com/scipy/scipy/issues/18716
|
104 |
+
g = csr_array(([1], np.array([[0], [1]], dtype=np.int64)), shape=(2, 2))
|
105 |
+
assert g.indices.dtype == np.int64
|
106 |
+
n, labels = csgraph.connected_components(g, directed=False)
|
107 |
+
assert n == 1
|
108 |
+
assert_array_almost_equal(labels, [0, 0])
|
109 |
+
|
110 |
+
|
111 |
+
def test_int64_indices_directed():
|
112 |
+
# See https://github.com/scipy/scipy/issues/18716
|
113 |
+
g = csr_array(([1], np.array([[0], [1]], dtype=np.int64)), shape=(2, 2))
|
114 |
+
assert g.indices.dtype == np.int64
|
115 |
+
n, labels = csgraph.connected_components(g, directed=True,
|
116 |
+
connection='strong')
|
117 |
+
assert n == 2
|
118 |
+
assert_array_almost_equal(labels, [1, 0])
|
119 |
+
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_conversions.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.testing import assert_array_almost_equal
|
3 |
+
from scipy.sparse import csr_matrix
|
4 |
+
from scipy.sparse.csgraph import csgraph_from_dense, csgraph_to_dense
|
5 |
+
|
6 |
+
|
7 |
+
def test_csgraph_from_dense():
|
8 |
+
np.random.seed(1234)
|
9 |
+
G = np.random.random((10, 10))
|
10 |
+
some_nulls = (G < 0.4)
|
11 |
+
all_nulls = (G < 0.8)
|
12 |
+
|
13 |
+
for null_value in [0, np.nan, np.inf]:
|
14 |
+
G[all_nulls] = null_value
|
15 |
+
with np.errstate(invalid="ignore"):
|
16 |
+
G_csr = csgraph_from_dense(G, null_value=0)
|
17 |
+
|
18 |
+
G[all_nulls] = 0
|
19 |
+
assert_array_almost_equal(G, G_csr.toarray())
|
20 |
+
|
21 |
+
for null_value in [np.nan, np.inf]:
|
22 |
+
G[all_nulls] = 0
|
23 |
+
G[some_nulls] = null_value
|
24 |
+
with np.errstate(invalid="ignore"):
|
25 |
+
G_csr = csgraph_from_dense(G, null_value=0)
|
26 |
+
|
27 |
+
G[all_nulls] = 0
|
28 |
+
assert_array_almost_equal(G, G_csr.toarray())
|
29 |
+
|
30 |
+
|
31 |
+
def test_csgraph_to_dense():
|
32 |
+
np.random.seed(1234)
|
33 |
+
G = np.random.random((10, 10))
|
34 |
+
nulls = (G < 0.8)
|
35 |
+
G[nulls] = np.inf
|
36 |
+
|
37 |
+
G_csr = csgraph_from_dense(G)
|
38 |
+
|
39 |
+
for null_value in [0, 10, -np.inf, np.inf]:
|
40 |
+
G[nulls] = null_value
|
41 |
+
assert_array_almost_equal(G, csgraph_to_dense(G_csr, null_value))
|
42 |
+
|
43 |
+
|
44 |
+
def test_multiple_edges():
|
45 |
+
# create a random square matrix with an even number of elements
|
46 |
+
np.random.seed(1234)
|
47 |
+
X = np.random.random((10, 10))
|
48 |
+
Xcsr = csr_matrix(X)
|
49 |
+
|
50 |
+
# now double-up every other column
|
51 |
+
Xcsr.indices[::2] = Xcsr.indices[1::2]
|
52 |
+
|
53 |
+
# normal sparse toarray() will sum the duplicated edges
|
54 |
+
Xdense = Xcsr.toarray()
|
55 |
+
assert_array_almost_equal(Xdense[:, 1::2],
|
56 |
+
X[:, ::2] + X[:, 1::2])
|
57 |
+
|
58 |
+
# csgraph_to_dense chooses the minimum of each duplicated edge
|
59 |
+
Xdense = csgraph_to_dense(Xcsr)
|
60 |
+
assert_array_almost_equal(Xdense[:, 1::2],
|
61 |
+
np.minimum(X[:, ::2], X[:, 1::2]))
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_flow.py
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.testing import assert_array_equal
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from scipy.sparse import csr_matrix, csc_matrix
|
6 |
+
from scipy.sparse.csgraph import maximum_flow
|
7 |
+
from scipy.sparse.csgraph._flow import (
|
8 |
+
_add_reverse_edges, _make_edge_pointers, _make_tails
|
9 |
+
)
|
10 |
+
|
11 |
+
methods = ['edmonds_karp', 'dinic']
|
12 |
+
|
13 |
+
def test_raises_on_dense_input():
|
14 |
+
with pytest.raises(TypeError):
|
15 |
+
graph = np.array([[0, 1], [0, 0]])
|
16 |
+
maximum_flow(graph, 0, 1)
|
17 |
+
maximum_flow(graph, 0, 1, method='edmonds_karp')
|
18 |
+
|
19 |
+
|
20 |
+
def test_raises_on_csc_input():
|
21 |
+
with pytest.raises(TypeError):
|
22 |
+
graph = csc_matrix([[0, 1], [0, 0]])
|
23 |
+
maximum_flow(graph, 0, 1)
|
24 |
+
maximum_flow(graph, 0, 1, method='edmonds_karp')
|
25 |
+
|
26 |
+
|
27 |
+
def test_raises_on_floating_point_input():
|
28 |
+
with pytest.raises(ValueError):
|
29 |
+
graph = csr_matrix([[0, 1.5], [0, 0]], dtype=np.float64)
|
30 |
+
maximum_flow(graph, 0, 1)
|
31 |
+
maximum_flow(graph, 0, 1, method='edmonds_karp')
|
32 |
+
|
33 |
+
|
34 |
+
def test_raises_on_non_square_input():
|
35 |
+
with pytest.raises(ValueError):
|
36 |
+
graph = csr_matrix([[0, 1, 2], [2, 1, 0]])
|
37 |
+
maximum_flow(graph, 0, 1)
|
38 |
+
|
39 |
+
|
40 |
+
def test_raises_when_source_is_sink():
|
41 |
+
with pytest.raises(ValueError):
|
42 |
+
graph = csr_matrix([[0, 1], [0, 0]])
|
43 |
+
maximum_flow(graph, 0, 0)
|
44 |
+
maximum_flow(graph, 0, 0, method='edmonds_karp')
|
45 |
+
|
46 |
+
|
47 |
+
@pytest.mark.parametrize('method', methods)
|
48 |
+
@pytest.mark.parametrize('source', [-1, 2, 3])
|
49 |
+
def test_raises_when_source_is_out_of_bounds(source, method):
|
50 |
+
with pytest.raises(ValueError):
|
51 |
+
graph = csr_matrix([[0, 1], [0, 0]])
|
52 |
+
maximum_flow(graph, source, 1, method=method)
|
53 |
+
|
54 |
+
|
55 |
+
@pytest.mark.parametrize('method', methods)
|
56 |
+
@pytest.mark.parametrize('sink', [-1, 2, 3])
|
57 |
+
def test_raises_when_sink_is_out_of_bounds(sink, method):
|
58 |
+
with pytest.raises(ValueError):
|
59 |
+
graph = csr_matrix([[0, 1], [0, 0]])
|
60 |
+
maximum_flow(graph, 0, sink, method=method)
|
61 |
+
|
62 |
+
|
63 |
+
@pytest.mark.parametrize('method', methods)
|
64 |
+
def test_simple_graph(method):
|
65 |
+
# This graph looks as follows:
|
66 |
+
# (0) --5--> (1)
|
67 |
+
graph = csr_matrix([[0, 5], [0, 0]])
|
68 |
+
res = maximum_flow(graph, 0, 1, method=method)
|
69 |
+
assert res.flow_value == 5
|
70 |
+
expected_flow = np.array([[0, 5], [-5, 0]])
|
71 |
+
assert_array_equal(res.flow.toarray(), expected_flow)
|
72 |
+
|
73 |
+
|
74 |
+
@pytest.mark.parametrize('method', methods)
|
75 |
+
def test_bottle_neck_graph(method):
|
76 |
+
# This graph cannot use the full capacity between 0 and 1:
|
77 |
+
# (0) --5--> (1) --3--> (2)
|
78 |
+
graph = csr_matrix([[0, 5, 0], [0, 0, 3], [0, 0, 0]])
|
79 |
+
res = maximum_flow(graph, 0, 2, method=method)
|
80 |
+
assert res.flow_value == 3
|
81 |
+
expected_flow = np.array([[0, 3, 0], [-3, 0, 3], [0, -3, 0]])
|
82 |
+
assert_array_equal(res.flow.toarray(), expected_flow)
|
83 |
+
|
84 |
+
|
85 |
+
@pytest.mark.parametrize('method', methods)
|
86 |
+
def test_backwards_flow(method):
|
87 |
+
# This example causes backwards flow between vertices 3 and 4,
|
88 |
+
# and so this test ensures that we handle that accordingly. See
|
89 |
+
# https://stackoverflow.com/q/38843963/5085211
|
90 |
+
# for more information.
|
91 |
+
graph = csr_matrix([[0, 10, 0, 0, 10, 0, 0, 0],
|
92 |
+
[0, 0, 10, 0, 0, 0, 0, 0],
|
93 |
+
[0, 0, 0, 10, 0, 0, 0, 0],
|
94 |
+
[0, 0, 0, 0, 0, 0, 0, 10],
|
95 |
+
[0, 0, 0, 10, 0, 10, 0, 0],
|
96 |
+
[0, 0, 0, 0, 0, 0, 10, 0],
|
97 |
+
[0, 0, 0, 0, 0, 0, 0, 10],
|
98 |
+
[0, 0, 0, 0, 0, 0, 0, 0]])
|
99 |
+
res = maximum_flow(graph, 0, 7, method=method)
|
100 |
+
assert res.flow_value == 20
|
101 |
+
expected_flow = np.array([[0, 10, 0, 0, 10, 0, 0, 0],
|
102 |
+
[-10, 0, 10, 0, 0, 0, 0, 0],
|
103 |
+
[0, -10, 0, 10, 0, 0, 0, 0],
|
104 |
+
[0, 0, -10, 0, 0, 0, 0, 10],
|
105 |
+
[-10, 0, 0, 0, 0, 10, 0, 0],
|
106 |
+
[0, 0, 0, 0, -10, 0, 10, 0],
|
107 |
+
[0, 0, 0, 0, 0, -10, 0, 10],
|
108 |
+
[0, 0, 0, -10, 0, 0, -10, 0]])
|
109 |
+
assert_array_equal(res.flow.toarray(), expected_flow)
|
110 |
+
|
111 |
+
|
112 |
+
@pytest.mark.parametrize('method', methods)
|
113 |
+
def test_example_from_clrs_chapter_26_1(method):
|
114 |
+
# See page 659 in CLRS second edition, but note that the maximum flow
|
115 |
+
# we find is slightly different than the one in CLRS; we push a flow of
|
116 |
+
# 12 to v_1 instead of v_2.
|
117 |
+
graph = csr_matrix([[0, 16, 13, 0, 0, 0],
|
118 |
+
[0, 0, 10, 12, 0, 0],
|
119 |
+
[0, 4, 0, 0, 14, 0],
|
120 |
+
[0, 0, 9, 0, 0, 20],
|
121 |
+
[0, 0, 0, 7, 0, 4],
|
122 |
+
[0, 0, 0, 0, 0, 0]])
|
123 |
+
res = maximum_flow(graph, 0, 5, method=method)
|
124 |
+
assert res.flow_value == 23
|
125 |
+
expected_flow = np.array([[0, 12, 11, 0, 0, 0],
|
126 |
+
[-12, 0, 0, 12, 0, 0],
|
127 |
+
[-11, 0, 0, 0, 11, 0],
|
128 |
+
[0, -12, 0, 0, -7, 19],
|
129 |
+
[0, 0, -11, 7, 0, 4],
|
130 |
+
[0, 0, 0, -19, -4, 0]])
|
131 |
+
assert_array_equal(res.flow.toarray(), expected_flow)
|
132 |
+
|
133 |
+
|
134 |
+
@pytest.mark.parametrize('method', methods)
|
135 |
+
def test_disconnected_graph(method):
|
136 |
+
# This tests the following disconnected graph:
|
137 |
+
# (0) --5--> (1) (2) --3--> (3)
|
138 |
+
graph = csr_matrix([[0, 5, 0, 0],
|
139 |
+
[0, 0, 0, 0],
|
140 |
+
[0, 0, 9, 3],
|
141 |
+
[0, 0, 0, 0]])
|
142 |
+
res = maximum_flow(graph, 0, 3, method=method)
|
143 |
+
assert res.flow_value == 0
|
144 |
+
expected_flow = np.zeros((4, 4), dtype=np.int32)
|
145 |
+
assert_array_equal(res.flow.toarray(), expected_flow)
|
146 |
+
|
147 |
+
|
148 |
+
@pytest.mark.parametrize('method', methods)
|
149 |
+
def test_add_reverse_edges_large_graph(method):
|
150 |
+
# Regression test for https://github.com/scipy/scipy/issues/14385
|
151 |
+
n = 100_000
|
152 |
+
indices = np.arange(1, n)
|
153 |
+
indptr = np.array(list(range(n)) + [n - 1])
|
154 |
+
data = np.ones(n - 1, dtype=np.int32)
|
155 |
+
graph = csr_matrix((data, indices, indptr), shape=(n, n))
|
156 |
+
res = maximum_flow(graph, 0, n - 1, method=method)
|
157 |
+
assert res.flow_value == 1
|
158 |
+
expected_flow = graph - graph.transpose()
|
159 |
+
assert_array_equal(res.flow.data, expected_flow.data)
|
160 |
+
assert_array_equal(res.flow.indices, expected_flow.indices)
|
161 |
+
assert_array_equal(res.flow.indptr, expected_flow.indptr)
|
162 |
+
|
163 |
+
|
164 |
+
@pytest.mark.parametrize("a,b_data_expected", [
|
165 |
+
([[]], []),
|
166 |
+
([[0], [0]], []),
|
167 |
+
([[1, 0, 2], [0, 0, 0], [0, 3, 0]], [1, 2, 0, 0, 3]),
|
168 |
+
([[9, 8, 7], [4, 5, 6], [0, 0, 0]], [9, 8, 7, 4, 5, 6, 0, 0])])
|
169 |
+
def test_add_reverse_edges(a, b_data_expected):
|
170 |
+
"""Test that the reversal of the edges of the input graph works
|
171 |
+
as expected.
|
172 |
+
"""
|
173 |
+
a = csr_matrix(a, dtype=np.int32, shape=(len(a), len(a)))
|
174 |
+
b = _add_reverse_edges(a)
|
175 |
+
assert_array_equal(b.data, b_data_expected)
|
176 |
+
|
177 |
+
|
178 |
+
@pytest.mark.parametrize("a,expected", [
|
179 |
+
([[]], []),
|
180 |
+
([[0]], []),
|
181 |
+
([[1]], [0]),
|
182 |
+
([[0, 1], [10, 0]], [1, 0]),
|
183 |
+
([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 3, 4, 1, 2])
|
184 |
+
])
|
185 |
+
def test_make_edge_pointers(a, expected):
|
186 |
+
a = csr_matrix(a, dtype=np.int32)
|
187 |
+
rev_edge_ptr = _make_edge_pointers(a)
|
188 |
+
assert_array_equal(rev_edge_ptr, expected)
|
189 |
+
|
190 |
+
|
191 |
+
@pytest.mark.parametrize("a,expected", [
|
192 |
+
([[]], []),
|
193 |
+
([[0]], []),
|
194 |
+
([[1]], [0]),
|
195 |
+
([[0, 1], [10, 0]], [0, 1]),
|
196 |
+
([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 0, 1, 2, 2])
|
197 |
+
])
|
198 |
+
def test_make_tails(a, expected):
|
199 |
+
a = csr_matrix(a, dtype=np.int32)
|
200 |
+
tails = _make_tails(a)
|
201 |
+
assert_array_equal(tails, expected)
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py
ADDED
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import numpy as np
|
3 |
+
from numpy.testing import assert_allclose
|
4 |
+
from pytest import raises as assert_raises
|
5 |
+
from scipy import sparse
|
6 |
+
|
7 |
+
from scipy.sparse import csgraph
|
8 |
+
from scipy._lib._util import np_long, np_ulong
|
9 |
+
|
10 |
+
|
11 |
+
def check_int_type(mat):
|
12 |
+
return np.issubdtype(mat.dtype, np.signedinteger) or np.issubdtype(
|
13 |
+
mat.dtype, np_ulong
|
14 |
+
)
|
15 |
+
|
16 |
+
|
17 |
+
def test_laplacian_value_error():
|
18 |
+
for t in int, float, complex:
|
19 |
+
for m in ([1, 1],
|
20 |
+
[[[1]]],
|
21 |
+
[[1, 2, 3], [4, 5, 6]],
|
22 |
+
[[1, 2], [3, 4], [5, 5]]):
|
23 |
+
A = np.array(m, dtype=t)
|
24 |
+
assert_raises(ValueError, csgraph.laplacian, A)
|
25 |
+
|
26 |
+
|
27 |
+
def _explicit_laplacian(x, normed=False):
|
28 |
+
if sparse.issparse(x):
|
29 |
+
x = x.toarray()
|
30 |
+
x = np.asarray(x)
|
31 |
+
y = -1.0 * x
|
32 |
+
for j in range(y.shape[0]):
|
33 |
+
y[j,j] = x[j,j+1:].sum() + x[j,:j].sum()
|
34 |
+
if normed:
|
35 |
+
d = np.diag(y).copy()
|
36 |
+
d[d == 0] = 1.0
|
37 |
+
y /= d[:,None]**.5
|
38 |
+
y /= d[None,:]**.5
|
39 |
+
return y
|
40 |
+
|
41 |
+
|
42 |
+
def _check_symmetric_graph_laplacian(mat, normed, copy=True):
|
43 |
+
if not hasattr(mat, 'shape'):
|
44 |
+
mat = eval(mat, dict(np=np, sparse=sparse))
|
45 |
+
|
46 |
+
if sparse.issparse(mat):
|
47 |
+
sp_mat = mat
|
48 |
+
mat = sp_mat.toarray()
|
49 |
+
else:
|
50 |
+
sp_mat = sparse.csr_matrix(mat)
|
51 |
+
|
52 |
+
mat_copy = np.copy(mat)
|
53 |
+
sp_mat_copy = sparse.csr_matrix(sp_mat, copy=True)
|
54 |
+
|
55 |
+
n_nodes = mat.shape[0]
|
56 |
+
explicit_laplacian = _explicit_laplacian(mat, normed=normed)
|
57 |
+
laplacian = csgraph.laplacian(mat, normed=normed, copy=copy)
|
58 |
+
sp_laplacian = csgraph.laplacian(sp_mat, normed=normed,
|
59 |
+
copy=copy)
|
60 |
+
|
61 |
+
if copy:
|
62 |
+
assert_allclose(mat, mat_copy)
|
63 |
+
_assert_allclose_sparse(sp_mat, sp_mat_copy)
|
64 |
+
else:
|
65 |
+
if not (normed and check_int_type(mat)):
|
66 |
+
assert_allclose(laplacian, mat)
|
67 |
+
if sp_mat.format == 'coo':
|
68 |
+
_assert_allclose_sparse(sp_laplacian, sp_mat)
|
69 |
+
|
70 |
+
assert_allclose(laplacian, sp_laplacian.toarray())
|
71 |
+
|
72 |
+
for tested in [laplacian, sp_laplacian.toarray()]:
|
73 |
+
if not normed:
|
74 |
+
assert_allclose(tested.sum(axis=0), np.zeros(n_nodes))
|
75 |
+
assert_allclose(tested.T, tested)
|
76 |
+
assert_allclose(tested, explicit_laplacian)
|
77 |
+
|
78 |
+
|
79 |
+
def test_symmetric_graph_laplacian():
|
80 |
+
symmetric_mats = (
|
81 |
+
'np.arange(10) * np.arange(10)[:, np.newaxis]',
|
82 |
+
'np.ones((7, 7))',
|
83 |
+
'np.eye(19)',
|
84 |
+
'sparse.diags([1, 1], [-1, 1], shape=(4, 4))',
|
85 |
+
'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).toarray()',
|
86 |
+
'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).todense()',
|
87 |
+
'np.vander(np.arange(4)) + np.vander(np.arange(4)).T'
|
88 |
+
)
|
89 |
+
for mat in symmetric_mats:
|
90 |
+
for normed in True, False:
|
91 |
+
for copy in True, False:
|
92 |
+
_check_symmetric_graph_laplacian(mat, normed, copy)
|
93 |
+
|
94 |
+
|
95 |
+
def _assert_allclose_sparse(a, b, **kwargs):
|
96 |
+
# helper function that can deal with sparse matrices
|
97 |
+
if sparse.issparse(a):
|
98 |
+
a = a.toarray()
|
99 |
+
if sparse.issparse(b):
|
100 |
+
b = b.toarray()
|
101 |
+
assert_allclose(a, b, **kwargs)
|
102 |
+
|
103 |
+
|
104 |
+
def _check_laplacian_dtype_none(
|
105 |
+
A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
|
106 |
+
):
|
107 |
+
mat = arr_type(A, dtype=dtype)
|
108 |
+
L, d = csgraph.laplacian(
|
109 |
+
mat,
|
110 |
+
normed=normed,
|
111 |
+
return_diag=True,
|
112 |
+
use_out_degree=use_out_degree,
|
113 |
+
copy=copy,
|
114 |
+
dtype=None,
|
115 |
+
)
|
116 |
+
if normed and check_int_type(mat):
|
117 |
+
assert L.dtype == np.float64
|
118 |
+
assert d.dtype == np.float64
|
119 |
+
_assert_allclose_sparse(L, desired_L, atol=1e-12)
|
120 |
+
_assert_allclose_sparse(d, desired_d, atol=1e-12)
|
121 |
+
else:
|
122 |
+
assert L.dtype == dtype
|
123 |
+
assert d.dtype == dtype
|
124 |
+
desired_L = np.asarray(desired_L).astype(dtype)
|
125 |
+
desired_d = np.asarray(desired_d).astype(dtype)
|
126 |
+
_assert_allclose_sparse(L, desired_L, atol=1e-12)
|
127 |
+
_assert_allclose_sparse(d, desired_d, atol=1e-12)
|
128 |
+
|
129 |
+
if not copy:
|
130 |
+
if not (normed and check_int_type(mat)):
|
131 |
+
if type(mat) is np.ndarray:
|
132 |
+
assert_allclose(L, mat)
|
133 |
+
elif mat.format == "coo":
|
134 |
+
_assert_allclose_sparse(L, mat)
|
135 |
+
|
136 |
+
|
137 |
+
def _check_laplacian_dtype(
|
138 |
+
A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
|
139 |
+
):
|
140 |
+
mat = arr_type(A, dtype=dtype)
|
141 |
+
L, d = csgraph.laplacian(
|
142 |
+
mat,
|
143 |
+
normed=normed,
|
144 |
+
return_diag=True,
|
145 |
+
use_out_degree=use_out_degree,
|
146 |
+
copy=copy,
|
147 |
+
dtype=dtype,
|
148 |
+
)
|
149 |
+
assert L.dtype == dtype
|
150 |
+
assert d.dtype == dtype
|
151 |
+
desired_L = np.asarray(desired_L).astype(dtype)
|
152 |
+
desired_d = np.asarray(desired_d).astype(dtype)
|
153 |
+
_assert_allclose_sparse(L, desired_L, atol=1e-12)
|
154 |
+
_assert_allclose_sparse(d, desired_d, atol=1e-12)
|
155 |
+
|
156 |
+
if not copy:
|
157 |
+
if not (normed and check_int_type(mat)):
|
158 |
+
if type(mat) is np.ndarray:
|
159 |
+
assert_allclose(L, mat)
|
160 |
+
elif mat.format == 'coo':
|
161 |
+
_assert_allclose_sparse(L, mat)
|
162 |
+
|
163 |
+
|
164 |
+
INT_DTYPES = {np.intc, np_long, np.longlong}
|
165 |
+
REAL_DTYPES = {np.float32, np.float64, np.longdouble}
|
166 |
+
COMPLEX_DTYPES = {np.complex64, np.complex128, np.clongdouble}
|
167 |
+
# use sorted list to ensure fixed order of tests
|
168 |
+
DTYPES = sorted(INT_DTYPES ^ REAL_DTYPES ^ COMPLEX_DTYPES, key=str)
|
169 |
+
|
170 |
+
|
171 |
+
@pytest.mark.parametrize("dtype", DTYPES)
|
172 |
+
@pytest.mark.parametrize("arr_type", [np.array,
|
173 |
+
sparse.csr_matrix,
|
174 |
+
sparse.coo_matrix,
|
175 |
+
sparse.csr_array,
|
176 |
+
sparse.coo_array])
|
177 |
+
@pytest.mark.parametrize("copy", [True, False])
|
178 |
+
@pytest.mark.parametrize("normed", [True, False])
|
179 |
+
@pytest.mark.parametrize("use_out_degree", [True, False])
|
180 |
+
def test_asymmetric_laplacian(use_out_degree, normed,
|
181 |
+
copy, dtype, arr_type):
|
182 |
+
# adjacency matrix
|
183 |
+
A = [[0, 1, 0],
|
184 |
+
[4, 2, 0],
|
185 |
+
[0, 0, 0]]
|
186 |
+
A = arr_type(np.array(A), dtype=dtype)
|
187 |
+
A_copy = A.copy()
|
188 |
+
|
189 |
+
if not normed and use_out_degree:
|
190 |
+
# Laplacian matrix using out-degree
|
191 |
+
L = [[1, -1, 0],
|
192 |
+
[-4, 4, 0],
|
193 |
+
[0, 0, 0]]
|
194 |
+
d = [1, 4, 0]
|
195 |
+
|
196 |
+
if normed and use_out_degree:
|
197 |
+
# normalized Laplacian matrix using out-degree
|
198 |
+
L = [[1, -0.5, 0],
|
199 |
+
[-2, 1, 0],
|
200 |
+
[0, 0, 0]]
|
201 |
+
d = [1, 2, 1]
|
202 |
+
|
203 |
+
if not normed and not use_out_degree:
|
204 |
+
# Laplacian matrix using in-degree
|
205 |
+
L = [[4, -1, 0],
|
206 |
+
[-4, 1, 0],
|
207 |
+
[0, 0, 0]]
|
208 |
+
d = [4, 1, 0]
|
209 |
+
|
210 |
+
if normed and not use_out_degree:
|
211 |
+
# normalized Laplacian matrix using in-degree
|
212 |
+
L = [[1, -0.5, 0],
|
213 |
+
[-2, 1, 0],
|
214 |
+
[0, 0, 0]]
|
215 |
+
d = [2, 1, 1]
|
216 |
+
|
217 |
+
_check_laplacian_dtype_none(
|
218 |
+
A,
|
219 |
+
L,
|
220 |
+
d,
|
221 |
+
normed=normed,
|
222 |
+
use_out_degree=use_out_degree,
|
223 |
+
copy=copy,
|
224 |
+
dtype=dtype,
|
225 |
+
arr_type=arr_type,
|
226 |
+
)
|
227 |
+
|
228 |
+
_check_laplacian_dtype(
|
229 |
+
A_copy,
|
230 |
+
L,
|
231 |
+
d,
|
232 |
+
normed=normed,
|
233 |
+
use_out_degree=use_out_degree,
|
234 |
+
copy=copy,
|
235 |
+
dtype=dtype,
|
236 |
+
arr_type=arr_type,
|
237 |
+
)
|
238 |
+
|
239 |
+
|
240 |
+
@pytest.mark.parametrize("fmt", ['csr', 'csc', 'coo', 'lil',
|
241 |
+
'dok', 'dia', 'bsr'])
|
242 |
+
@pytest.mark.parametrize("normed", [True, False])
|
243 |
+
@pytest.mark.parametrize("copy", [True, False])
|
244 |
+
def test_sparse_formats(fmt, normed, copy):
|
245 |
+
mat = sparse.diags([1, 1], [-1, 1], shape=(4, 4), format=fmt)
|
246 |
+
_check_symmetric_graph_laplacian(mat, normed, copy)
|
247 |
+
|
248 |
+
|
249 |
+
@pytest.mark.parametrize(
|
250 |
+
"arr_type", [np.asarray,
|
251 |
+
sparse.csr_matrix,
|
252 |
+
sparse.coo_matrix,
|
253 |
+
sparse.csr_array,
|
254 |
+
sparse.coo_array]
|
255 |
+
)
|
256 |
+
@pytest.mark.parametrize("form", ["array", "function", "lo"])
|
257 |
+
def test_laplacian_symmetrized(arr_type, form):
|
258 |
+
# adjacency matrix
|
259 |
+
n = 3
|
260 |
+
mat = arr_type(np.arange(n * n).reshape(n, n))
|
261 |
+
L_in, d_in = csgraph.laplacian(
|
262 |
+
mat,
|
263 |
+
return_diag=True,
|
264 |
+
form=form,
|
265 |
+
)
|
266 |
+
L_out, d_out = csgraph.laplacian(
|
267 |
+
mat,
|
268 |
+
return_diag=True,
|
269 |
+
use_out_degree=True,
|
270 |
+
form=form,
|
271 |
+
)
|
272 |
+
Ls, ds = csgraph.laplacian(
|
273 |
+
mat,
|
274 |
+
return_diag=True,
|
275 |
+
symmetrized=True,
|
276 |
+
form=form,
|
277 |
+
)
|
278 |
+
Ls_normed, ds_normed = csgraph.laplacian(
|
279 |
+
mat,
|
280 |
+
return_diag=True,
|
281 |
+
symmetrized=True,
|
282 |
+
normed=True,
|
283 |
+
form=form,
|
284 |
+
)
|
285 |
+
mat += mat.T
|
286 |
+
Lss, dss = csgraph.laplacian(mat, return_diag=True, form=form)
|
287 |
+
Lss_normed, dss_normed = csgraph.laplacian(
|
288 |
+
mat,
|
289 |
+
return_diag=True,
|
290 |
+
normed=True,
|
291 |
+
form=form,
|
292 |
+
)
|
293 |
+
|
294 |
+
assert_allclose(ds, d_in + d_out)
|
295 |
+
assert_allclose(ds, dss)
|
296 |
+
assert_allclose(ds_normed, dss_normed)
|
297 |
+
|
298 |
+
d = {}
|
299 |
+
for L in ["L_in", "L_out", "Ls", "Ls_normed", "Lss", "Lss_normed"]:
|
300 |
+
if form == "array":
|
301 |
+
d[L] = eval(L)
|
302 |
+
else:
|
303 |
+
d[L] = eval(L)(np.eye(n, dtype=mat.dtype))
|
304 |
+
|
305 |
+
_assert_allclose_sparse(d["Ls"], d["L_in"] + d["L_out"].T)
|
306 |
+
_assert_allclose_sparse(d["Ls"], d["Lss"])
|
307 |
+
_assert_allclose_sparse(d["Ls_normed"], d["Lss_normed"])
|
308 |
+
|
309 |
+
|
310 |
+
@pytest.mark.parametrize(
|
311 |
+
"arr_type", [np.asarray,
|
312 |
+
sparse.csr_matrix,
|
313 |
+
sparse.coo_matrix,
|
314 |
+
sparse.csr_array,
|
315 |
+
sparse.coo_array]
|
316 |
+
)
|
317 |
+
@pytest.mark.parametrize("dtype", DTYPES)
|
318 |
+
@pytest.mark.parametrize("normed", [True, False])
|
319 |
+
@pytest.mark.parametrize("symmetrized", [True, False])
|
320 |
+
@pytest.mark.parametrize("use_out_degree", [True, False])
|
321 |
+
@pytest.mark.parametrize("form", ["function", "lo"])
|
322 |
+
def test_format(dtype, arr_type, normed, symmetrized, use_out_degree, form):
|
323 |
+
n = 3
|
324 |
+
mat = [[0, 1, 0], [4, 2, 0], [0, 0, 0]]
|
325 |
+
mat = arr_type(np.array(mat), dtype=dtype)
|
326 |
+
Lo, do = csgraph.laplacian(
|
327 |
+
mat,
|
328 |
+
return_diag=True,
|
329 |
+
normed=normed,
|
330 |
+
symmetrized=symmetrized,
|
331 |
+
use_out_degree=use_out_degree,
|
332 |
+
dtype=dtype,
|
333 |
+
)
|
334 |
+
La, da = csgraph.laplacian(
|
335 |
+
mat,
|
336 |
+
return_diag=True,
|
337 |
+
normed=normed,
|
338 |
+
symmetrized=symmetrized,
|
339 |
+
use_out_degree=use_out_degree,
|
340 |
+
dtype=dtype,
|
341 |
+
form="array",
|
342 |
+
)
|
343 |
+
assert_allclose(do, da)
|
344 |
+
_assert_allclose_sparse(Lo, La)
|
345 |
+
|
346 |
+
L, d = csgraph.laplacian(
|
347 |
+
mat,
|
348 |
+
return_diag=True,
|
349 |
+
normed=normed,
|
350 |
+
symmetrized=symmetrized,
|
351 |
+
use_out_degree=use_out_degree,
|
352 |
+
dtype=dtype,
|
353 |
+
form=form,
|
354 |
+
)
|
355 |
+
assert_allclose(d, do)
|
356 |
+
assert d.dtype == dtype
|
357 |
+
Lm = L(np.eye(n, dtype=mat.dtype)).astype(dtype)
|
358 |
+
_assert_allclose_sparse(Lm, Lo, rtol=2e-7, atol=2e-7)
|
359 |
+
x = np.arange(6).reshape(3, 2)
|
360 |
+
if not (normed and dtype in INT_DTYPES):
|
361 |
+
assert_allclose(L(x), Lo @ x)
|
362 |
+
else:
|
363 |
+
# Normalized Lo is casted to integer, but L() is not
|
364 |
+
pass
|
365 |
+
|
366 |
+
|
367 |
+
def test_format_error_message():
|
368 |
+
with pytest.raises(ValueError, match="Invalid form: 'toto'"):
|
369 |
+
_ = csgraph.laplacian(np.eye(1), form='toto')
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_matching.py
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from itertools import product
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from numpy.testing import assert_array_equal, assert_equal
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from scipy.sparse import csr_matrix, coo_matrix, diags
|
8 |
+
from scipy.sparse.csgraph import (
|
9 |
+
maximum_bipartite_matching, min_weight_full_bipartite_matching
|
10 |
+
)
|
11 |
+
|
12 |
+
|
13 |
+
def test_maximum_bipartite_matching_raises_on_dense_input():
|
14 |
+
with pytest.raises(TypeError):
|
15 |
+
graph = np.array([[0, 1], [0, 0]])
|
16 |
+
maximum_bipartite_matching(graph)
|
17 |
+
|
18 |
+
|
19 |
+
def test_maximum_bipartite_matching_empty_graph():
|
20 |
+
graph = csr_matrix((0, 0))
|
21 |
+
x = maximum_bipartite_matching(graph, perm_type='row')
|
22 |
+
y = maximum_bipartite_matching(graph, perm_type='column')
|
23 |
+
expected_matching = np.array([])
|
24 |
+
assert_array_equal(expected_matching, x)
|
25 |
+
assert_array_equal(expected_matching, y)
|
26 |
+
|
27 |
+
|
28 |
+
def test_maximum_bipartite_matching_empty_left_partition():
|
29 |
+
graph = csr_matrix((2, 0))
|
30 |
+
x = maximum_bipartite_matching(graph, perm_type='row')
|
31 |
+
y = maximum_bipartite_matching(graph, perm_type='column')
|
32 |
+
assert_array_equal(np.array([]), x)
|
33 |
+
assert_array_equal(np.array([-1, -1]), y)
|
34 |
+
|
35 |
+
|
36 |
+
def test_maximum_bipartite_matching_empty_right_partition():
|
37 |
+
graph = csr_matrix((0, 3))
|
38 |
+
x = maximum_bipartite_matching(graph, perm_type='row')
|
39 |
+
y = maximum_bipartite_matching(graph, perm_type='column')
|
40 |
+
assert_array_equal(np.array([-1, -1, -1]), x)
|
41 |
+
assert_array_equal(np.array([]), y)
|
42 |
+
|
43 |
+
|
44 |
+
def test_maximum_bipartite_matching_graph_with_no_edges():
|
45 |
+
graph = csr_matrix((2, 2))
|
46 |
+
x = maximum_bipartite_matching(graph, perm_type='row')
|
47 |
+
y = maximum_bipartite_matching(graph, perm_type='column')
|
48 |
+
assert_array_equal(np.array([-1, -1]), x)
|
49 |
+
assert_array_equal(np.array([-1, -1]), y)
|
50 |
+
|
51 |
+
|
52 |
+
def test_maximum_bipartite_matching_graph_that_causes_augmentation():
|
53 |
+
# In this graph, column 1 is initially assigned to row 1, but it should be
|
54 |
+
# reassigned to make room for row 2.
|
55 |
+
graph = csr_matrix([[1, 1], [1, 0]])
|
56 |
+
x = maximum_bipartite_matching(graph, perm_type='column')
|
57 |
+
y = maximum_bipartite_matching(graph, perm_type='row')
|
58 |
+
expected_matching = np.array([1, 0])
|
59 |
+
assert_array_equal(expected_matching, x)
|
60 |
+
assert_array_equal(expected_matching, y)
|
61 |
+
|
62 |
+
|
63 |
+
def test_maximum_bipartite_matching_graph_with_more_rows_than_columns():
|
64 |
+
graph = csr_matrix([[1, 1], [1, 0], [0, 1]])
|
65 |
+
x = maximum_bipartite_matching(graph, perm_type='column')
|
66 |
+
y = maximum_bipartite_matching(graph, perm_type='row')
|
67 |
+
assert_array_equal(np.array([0, -1, 1]), x)
|
68 |
+
assert_array_equal(np.array([0, 2]), y)
|
69 |
+
|
70 |
+
|
71 |
+
def test_maximum_bipartite_matching_graph_with_more_columns_than_rows():
|
72 |
+
graph = csr_matrix([[1, 1, 0], [0, 0, 1]])
|
73 |
+
x = maximum_bipartite_matching(graph, perm_type='column')
|
74 |
+
y = maximum_bipartite_matching(graph, perm_type='row')
|
75 |
+
assert_array_equal(np.array([0, 2]), x)
|
76 |
+
assert_array_equal(np.array([0, -1, 1]), y)
|
77 |
+
|
78 |
+
|
79 |
+
def test_maximum_bipartite_matching_explicit_zeros_count_as_edges():
|
80 |
+
data = [0, 0]
|
81 |
+
indices = [1, 0]
|
82 |
+
indptr = [0, 1, 2]
|
83 |
+
graph = csr_matrix((data, indices, indptr), shape=(2, 2))
|
84 |
+
x = maximum_bipartite_matching(graph, perm_type='row')
|
85 |
+
y = maximum_bipartite_matching(graph, perm_type='column')
|
86 |
+
expected_matching = np.array([1, 0])
|
87 |
+
assert_array_equal(expected_matching, x)
|
88 |
+
assert_array_equal(expected_matching, y)
|
89 |
+
|
90 |
+
|
91 |
+
def test_maximum_bipartite_matching_feasibility_of_result():
|
92 |
+
# This is a regression test for GitHub issue #11458
|
93 |
+
data = np.ones(50, dtype=int)
|
94 |
+
indices = [11, 12, 19, 22, 23, 5, 22, 3, 8, 10, 5, 6, 11, 12, 13, 5, 13,
|
95 |
+
14, 20, 22, 3, 15, 3, 13, 14, 11, 12, 19, 22, 23, 5, 22, 3, 8,
|
96 |
+
10, 5, 6, 11, 12, 13, 5, 13, 14, 20, 22, 3, 15, 3, 13, 14]
|
97 |
+
indptr = [0, 5, 7, 10, 10, 15, 20, 22, 22, 23, 25, 30, 32, 35, 35, 40, 45,
|
98 |
+
47, 47, 48, 50]
|
99 |
+
graph = csr_matrix((data, indices, indptr), shape=(20, 25))
|
100 |
+
x = maximum_bipartite_matching(graph, perm_type='row')
|
101 |
+
y = maximum_bipartite_matching(graph, perm_type='column')
|
102 |
+
assert (x != -1).sum() == 13
|
103 |
+
assert (y != -1).sum() == 13
|
104 |
+
# Ensure that each element of the matching is in fact an edge in the graph.
|
105 |
+
for u, v in zip(range(graph.shape[0]), y):
|
106 |
+
if v != -1:
|
107 |
+
assert graph[u, v]
|
108 |
+
for u, v in zip(x, range(graph.shape[1])):
|
109 |
+
if u != -1:
|
110 |
+
assert graph[u, v]
|
111 |
+
|
112 |
+
|
113 |
+
def test_matching_large_random_graph_with_one_edge_incident_to_each_vertex():
|
114 |
+
np.random.seed(42)
|
115 |
+
A = diags(np.ones(25), offsets=0, format='csr')
|
116 |
+
rand_perm = np.random.permutation(25)
|
117 |
+
rand_perm2 = np.random.permutation(25)
|
118 |
+
|
119 |
+
Rrow = np.arange(25)
|
120 |
+
Rcol = rand_perm
|
121 |
+
Rdata = np.ones(25, dtype=int)
|
122 |
+
Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr()
|
123 |
+
|
124 |
+
Crow = rand_perm2
|
125 |
+
Ccol = np.arange(25)
|
126 |
+
Cdata = np.ones(25, dtype=int)
|
127 |
+
Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr()
|
128 |
+
# Randomly permute identity matrix
|
129 |
+
B = Rmat * A * Cmat
|
130 |
+
|
131 |
+
# Row permute
|
132 |
+
perm = maximum_bipartite_matching(B, perm_type='row')
|
133 |
+
Rrow = np.arange(25)
|
134 |
+
Rcol = perm
|
135 |
+
Rdata = np.ones(25, dtype=int)
|
136 |
+
Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr()
|
137 |
+
C1 = Rmat * B
|
138 |
+
|
139 |
+
# Column permute
|
140 |
+
perm2 = maximum_bipartite_matching(B, perm_type='column')
|
141 |
+
Crow = perm2
|
142 |
+
Ccol = np.arange(25)
|
143 |
+
Cdata = np.ones(25, dtype=int)
|
144 |
+
Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr()
|
145 |
+
C2 = B * Cmat
|
146 |
+
|
147 |
+
# Should get identity matrix back
|
148 |
+
assert_equal(any(C1.diagonal() == 0), False)
|
149 |
+
assert_equal(any(C2.diagonal() == 0), False)
|
150 |
+
|
151 |
+
|
152 |
+
@pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)])
|
153 |
+
def test_min_weight_full_matching_trivial_graph(num_rows, num_cols):
|
154 |
+
biadjacency_matrix = csr_matrix((num_cols, num_rows))
|
155 |
+
row_ind, col_ind = min_weight_full_bipartite_matching(biadjacency_matrix)
|
156 |
+
assert len(row_ind) == 0
|
157 |
+
assert len(col_ind) == 0
|
158 |
+
|
159 |
+
|
160 |
+
@pytest.mark.parametrize('biadjacency_matrix',
|
161 |
+
[
|
162 |
+
[[1, 1, 1], [1, 0, 0], [1, 0, 0]],
|
163 |
+
[[1, 1, 1], [0, 0, 1], [0, 0, 1]],
|
164 |
+
[[1, 0, 0, 1], [1, 1, 0, 1], [0, 0, 0, 0]],
|
165 |
+
[[1, 0, 0], [2, 0, 0]],
|
166 |
+
[[0, 1, 0], [0, 2, 0]],
|
167 |
+
[[1, 0], [2, 0], [5, 0]]
|
168 |
+
])
|
169 |
+
def test_min_weight_full_matching_infeasible_problems(biadjacency_matrix):
|
170 |
+
with pytest.raises(ValueError):
|
171 |
+
min_weight_full_bipartite_matching(csr_matrix(biadjacency_matrix))
|
172 |
+
|
173 |
+
|
174 |
+
def test_min_weight_full_matching_large_infeasible():
|
175 |
+
# Regression test for GitHub issue #17269
|
176 |
+
a = np.asarray([
|
177 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
178 |
+
0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
179 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
180 |
+
0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
181 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
182 |
+
0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
183 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
184 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0],
|
185 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
186 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0],
|
187 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
188 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0],
|
189 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
190 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0],
|
191 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
192 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0],
|
193 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
194 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001],
|
195 |
+
[0.0, 0.11687445, 0.0, 0.0, 0.01319788, 0.07509257, 0.0,
|
196 |
+
0.0, 0.0, 0.74228317, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
197 |
+
0.0, 0.0, 0.0, 0.0, 0.0],
|
198 |
+
[0.0, 0.0, 0.0, 0.81087935, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
199 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
200 |
+
[0.0, 0.0, 0.0, 0.0, 0.8408466, 0.0, 0.0, 0.0, 0.0, 0.01194389,
|
201 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
202 |
+
[0.0, 0.82994211, 0.0, 0.0, 0.0, 0.11468516, 0.0, 0.0, 0.0,
|
203 |
+
0.11173505, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
204 |
+
0.0, 0.0],
|
205 |
+
[0.18796507, 0.0, 0.04002318, 0.0, 0.0, 0.0, 0.0, 0.0, 0.75883335,
|
206 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
207 |
+
[0.0, 0.0, 0.71545464, 0.0, 0.0, 0.0, 0.0, 0.0, 0.02748488,
|
208 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
209 |
+
[0.78470564, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.14829198,
|
210 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
211 |
+
[0.0, 0.10870609, 0.0, 0.0, 0.0, 0.8918677, 0.0, 0.0, 0.0, 0.06306644,
|
212 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
213 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
214 |
+
0.63844085, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
215 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7442354, 0.0, 0.0, 0.0,
|
216 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
217 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09850549, 0.0, 0.0, 0.18638258,
|
218 |
+
0.2769244, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
219 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.73182464, 0.0, 0.0, 0.46443561,
|
220 |
+
0.38589284, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
221 |
+
[0.29510278, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09666032, 0.0,
|
222 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
|
223 |
+
])
|
224 |
+
with pytest.raises(ValueError, match='no full matching exists'):
|
225 |
+
min_weight_full_bipartite_matching(csr_matrix(a))
|
226 |
+
|
227 |
+
|
228 |
+
def test_explicit_zero_causes_warning():
|
229 |
+
with pytest.warns(UserWarning):
|
230 |
+
biadjacency_matrix = csr_matrix(((2, 0, 3), (0, 1, 1), (0, 2, 3)))
|
231 |
+
min_weight_full_bipartite_matching(biadjacency_matrix)
|
232 |
+
|
233 |
+
|
234 |
+
# General test for linear sum assignment solvers to make it possible to rely
|
235 |
+
# on the same tests for scipy.optimize.linear_sum_assignment.
|
236 |
+
def linear_sum_assignment_assertions(
|
237 |
+
solver, array_type, sign, test_case
|
238 |
+
):
|
239 |
+
cost_matrix, expected_cost = test_case
|
240 |
+
maximize = sign == -1
|
241 |
+
cost_matrix = sign * array_type(cost_matrix)
|
242 |
+
expected_cost = sign * np.array(expected_cost)
|
243 |
+
|
244 |
+
row_ind, col_ind = solver(cost_matrix, maximize=maximize)
|
245 |
+
assert_array_equal(row_ind, np.sort(row_ind))
|
246 |
+
assert_array_equal(expected_cost,
|
247 |
+
np.array(cost_matrix[row_ind, col_ind]).flatten())
|
248 |
+
|
249 |
+
cost_matrix = cost_matrix.T
|
250 |
+
row_ind, col_ind = solver(cost_matrix, maximize=maximize)
|
251 |
+
assert_array_equal(row_ind, np.sort(row_ind))
|
252 |
+
assert_array_equal(np.sort(expected_cost),
|
253 |
+
np.sort(np.array(
|
254 |
+
cost_matrix[row_ind, col_ind])).flatten())
|
255 |
+
|
256 |
+
|
257 |
+
linear_sum_assignment_test_cases = product(
|
258 |
+
[-1, 1],
|
259 |
+
[
|
260 |
+
# Square
|
261 |
+
([[400, 150, 400],
|
262 |
+
[400, 450, 600],
|
263 |
+
[300, 225, 300]],
|
264 |
+
[150, 400, 300]),
|
265 |
+
|
266 |
+
# Rectangular variant
|
267 |
+
([[400, 150, 400, 1],
|
268 |
+
[400, 450, 600, 2],
|
269 |
+
[300, 225, 300, 3]],
|
270 |
+
[150, 2, 300]),
|
271 |
+
|
272 |
+
([[10, 10, 8],
|
273 |
+
[9, 8, 1],
|
274 |
+
[9, 7, 4]],
|
275 |
+
[10, 1, 7]),
|
276 |
+
|
277 |
+
# Square
|
278 |
+
([[10, 10, 8, 11],
|
279 |
+
[9, 8, 1, 1],
|
280 |
+
[9, 7, 4, 10]],
|
281 |
+
[10, 1, 4]),
|
282 |
+
|
283 |
+
# Rectangular variant
|
284 |
+
([[10, float("inf"), float("inf")],
|
285 |
+
[float("inf"), float("inf"), 1],
|
286 |
+
[float("inf"), 7, float("inf")]],
|
287 |
+
[10, 1, 7])
|
288 |
+
])
|
289 |
+
|
290 |
+
|
291 |
+
@pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases)
|
292 |
+
def test_min_weight_full_matching_small_inputs(sign, test_case):
|
293 |
+
linear_sum_assignment_assertions(
|
294 |
+
min_weight_full_bipartite_matching, csr_matrix, sign, test_case)
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_pydata_sparse.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import scipy.sparse as sp
|
5 |
+
import scipy.sparse.csgraph as spgraph
|
6 |
+
|
7 |
+
from numpy.testing import assert_equal
|
8 |
+
|
9 |
+
try:
|
10 |
+
import sparse
|
11 |
+
except Exception:
|
12 |
+
sparse = None
|
13 |
+
|
14 |
+
pytestmark = pytest.mark.skipif(sparse is None,
|
15 |
+
reason="pydata/sparse not installed")
|
16 |
+
|
17 |
+
|
18 |
+
msg = "pydata/sparse (0.15.1) does not implement necessary operations"
|
19 |
+
|
20 |
+
|
21 |
+
sparse_params = (pytest.param("COO"),
|
22 |
+
pytest.param("DOK", marks=[pytest.mark.xfail(reason=msg)]))
|
23 |
+
|
24 |
+
|
25 |
+
@pytest.fixture(params=sparse_params)
|
26 |
+
def sparse_cls(request):
|
27 |
+
return getattr(sparse, request.param)
|
28 |
+
|
29 |
+
|
30 |
+
@pytest.fixture
|
31 |
+
def graphs(sparse_cls):
|
32 |
+
graph = [
|
33 |
+
[0, 1, 1, 0, 0],
|
34 |
+
[0, 0, 1, 0, 0],
|
35 |
+
[0, 0, 0, 0, 0],
|
36 |
+
[0, 0, 0, 0, 1],
|
37 |
+
[0, 0, 0, 0, 0],
|
38 |
+
]
|
39 |
+
A_dense = np.array(graph)
|
40 |
+
A_sparse = sparse_cls(A_dense)
|
41 |
+
return A_dense, A_sparse
|
42 |
+
|
43 |
+
|
44 |
+
@pytest.mark.parametrize(
|
45 |
+
"func",
|
46 |
+
[
|
47 |
+
spgraph.shortest_path,
|
48 |
+
spgraph.dijkstra,
|
49 |
+
spgraph.floyd_warshall,
|
50 |
+
spgraph.bellman_ford,
|
51 |
+
spgraph.johnson,
|
52 |
+
spgraph.reverse_cuthill_mckee,
|
53 |
+
spgraph.maximum_bipartite_matching,
|
54 |
+
spgraph.structural_rank,
|
55 |
+
]
|
56 |
+
)
|
57 |
+
def test_csgraph_equiv(func, graphs):
|
58 |
+
A_dense, A_sparse = graphs
|
59 |
+
actual = func(A_sparse)
|
60 |
+
desired = func(sp.csc_matrix(A_dense))
|
61 |
+
assert_equal(actual, desired)
|
62 |
+
|
63 |
+
|
64 |
+
def test_connected_components(graphs):
|
65 |
+
A_dense, A_sparse = graphs
|
66 |
+
func = spgraph.connected_components
|
67 |
+
|
68 |
+
actual_comp, actual_labels = func(A_sparse)
|
69 |
+
desired_comp, desired_labels, = func(sp.csc_matrix(A_dense))
|
70 |
+
|
71 |
+
assert actual_comp == desired_comp
|
72 |
+
assert_equal(actual_labels, desired_labels)
|
73 |
+
|
74 |
+
|
75 |
+
def test_laplacian(graphs):
|
76 |
+
A_dense, A_sparse = graphs
|
77 |
+
sparse_cls = type(A_sparse)
|
78 |
+
func = spgraph.laplacian
|
79 |
+
|
80 |
+
actual = func(A_sparse)
|
81 |
+
desired = func(sp.csc_matrix(A_dense))
|
82 |
+
|
83 |
+
assert isinstance(actual, sparse_cls)
|
84 |
+
|
85 |
+
assert_equal(actual.todense(), desired.todense())
|
86 |
+
|
87 |
+
|
88 |
+
@pytest.mark.parametrize(
|
89 |
+
"func", [spgraph.breadth_first_order, spgraph.depth_first_order]
|
90 |
+
)
|
91 |
+
def test_order_search(graphs, func):
|
92 |
+
A_dense, A_sparse = graphs
|
93 |
+
|
94 |
+
actual = func(A_sparse, 0)
|
95 |
+
desired = func(sp.csc_matrix(A_dense), 0)
|
96 |
+
|
97 |
+
assert_equal(actual, desired)
|
98 |
+
|
99 |
+
|
100 |
+
@pytest.mark.parametrize(
|
101 |
+
"func", [spgraph.breadth_first_tree, spgraph.depth_first_tree]
|
102 |
+
)
|
103 |
+
def test_tree_search(graphs, func):
|
104 |
+
A_dense, A_sparse = graphs
|
105 |
+
sparse_cls = type(A_sparse)
|
106 |
+
|
107 |
+
actual = func(A_sparse, 0)
|
108 |
+
desired = func(sp.csc_matrix(A_dense), 0)
|
109 |
+
|
110 |
+
assert isinstance(actual, sparse_cls)
|
111 |
+
|
112 |
+
assert_equal(actual.todense(), desired.todense())
|
113 |
+
|
114 |
+
|
115 |
+
def test_minimum_spanning_tree(graphs):
|
116 |
+
A_dense, A_sparse = graphs
|
117 |
+
sparse_cls = type(A_sparse)
|
118 |
+
func = spgraph.minimum_spanning_tree
|
119 |
+
|
120 |
+
actual = func(A_sparse)
|
121 |
+
desired = func(sp.csc_matrix(A_dense))
|
122 |
+
|
123 |
+
assert isinstance(actual, sparse_cls)
|
124 |
+
|
125 |
+
assert_equal(actual.todense(), desired.todense())
|
126 |
+
|
127 |
+
|
128 |
+
def test_maximum_flow(graphs):
|
129 |
+
A_dense, A_sparse = graphs
|
130 |
+
sparse_cls = type(A_sparse)
|
131 |
+
func = spgraph.maximum_flow
|
132 |
+
|
133 |
+
actual = func(A_sparse, 0, 2)
|
134 |
+
desired = func(sp.csr_matrix(A_dense), 0, 2)
|
135 |
+
|
136 |
+
assert actual.flow_value == desired.flow_value
|
137 |
+
assert isinstance(actual.flow, sparse_cls)
|
138 |
+
|
139 |
+
assert_equal(actual.flow.todense(), desired.flow.todense())
|
140 |
+
|
141 |
+
|
142 |
+
def test_min_weight_full_bipartite_matching(graphs):
|
143 |
+
A_dense, A_sparse = graphs
|
144 |
+
func = spgraph.min_weight_full_bipartite_matching
|
145 |
+
|
146 |
+
actual = func(A_sparse[0:2, 1:3])
|
147 |
+
desired = func(sp.csc_matrix(A_dense)[0:2, 1:3])
|
148 |
+
|
149 |
+
assert_equal(actual, desired)
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_reordering.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.testing import assert_equal
|
3 |
+
from scipy.sparse.csgraph import reverse_cuthill_mckee, structural_rank
|
4 |
+
from scipy.sparse import csc_matrix, csr_matrix, coo_matrix
|
5 |
+
|
6 |
+
|
7 |
+
def test_graph_reverse_cuthill_mckee():
|
8 |
+
A = np.array([[1, 0, 0, 0, 1, 0, 0, 0],
|
9 |
+
[0, 1, 1, 0, 0, 1, 0, 1],
|
10 |
+
[0, 1, 1, 0, 1, 0, 0, 0],
|
11 |
+
[0, 0, 0, 1, 0, 0, 1, 0],
|
12 |
+
[1, 0, 1, 0, 1, 0, 0, 0],
|
13 |
+
[0, 1, 0, 0, 0, 1, 0, 1],
|
14 |
+
[0, 0, 0, 1, 0, 0, 1, 0],
|
15 |
+
[0, 1, 0, 0, 0, 1, 0, 1]], dtype=int)
|
16 |
+
|
17 |
+
graph = csr_matrix(A)
|
18 |
+
perm = reverse_cuthill_mckee(graph)
|
19 |
+
correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
|
20 |
+
assert_equal(perm, correct_perm)
|
21 |
+
|
22 |
+
# Test int64 indices input
|
23 |
+
graph.indices = graph.indices.astype('int64')
|
24 |
+
graph.indptr = graph.indptr.astype('int64')
|
25 |
+
perm = reverse_cuthill_mckee(graph, True)
|
26 |
+
assert_equal(perm, correct_perm)
|
27 |
+
|
28 |
+
|
29 |
+
def test_graph_reverse_cuthill_mckee_ordering():
|
30 |
+
data = np.ones(63,dtype=int)
|
31 |
+
rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2,
|
32 |
+
2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
|
33 |
+
6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,
|
34 |
+
9, 10, 10, 10, 10, 10, 11, 11, 11, 11,
|
35 |
+
12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
|
36 |
+
14, 15, 15, 15, 15, 15])
|
37 |
+
cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2,
|
38 |
+
7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13,
|
39 |
+
15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13,
|
40 |
+
1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11,
|
41 |
+
4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14,
|
42 |
+
5, 7, 10, 13, 15])
|
43 |
+
graph = coo_matrix((data, (rows,cols))).tocsr()
|
44 |
+
perm = reverse_cuthill_mckee(graph)
|
45 |
+
correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15,
|
46 |
+
0, 13, 7, 5, 9, 11, 1, 3])
|
47 |
+
assert_equal(perm, correct_perm)
|
48 |
+
|
49 |
+
|
50 |
+
def test_graph_structural_rank():
|
51 |
+
# Test square matrix #1
|
52 |
+
A = csc_matrix([[1, 1, 0],
|
53 |
+
[1, 0, 1],
|
54 |
+
[0, 1, 0]])
|
55 |
+
assert_equal(structural_rank(A), 3)
|
56 |
+
|
57 |
+
# Test square matrix #2
|
58 |
+
rows = np.array([0,0,0,0,0,1,1,2,2,3,3,3,3,3,3,4,4,5,5,6,6,7,7])
|
59 |
+
cols = np.array([0,1,2,3,4,2,5,2,6,0,1,3,5,6,7,4,5,5,6,2,6,2,4])
|
60 |
+
data = np.ones_like(rows)
|
61 |
+
B = coo_matrix((data,(rows,cols)), shape=(8,8))
|
62 |
+
assert_equal(structural_rank(B), 6)
|
63 |
+
|
64 |
+
#Test non-square matrix
|
65 |
+
C = csc_matrix([[1, 0, 2, 0],
|
66 |
+
[2, 0, 4, 0]])
|
67 |
+
assert_equal(structural_rank(C), 2)
|
68 |
+
|
69 |
+
#Test tall matrix
|
70 |
+
assert_equal(structural_rank(C.T), 2)
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.py
ADDED
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from io import StringIO
|
2 |
+
import warnings
|
3 |
+
import numpy as np
|
4 |
+
from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose
|
5 |
+
from pytest import raises as assert_raises
|
6 |
+
from scipy.sparse.csgraph import (shortest_path, dijkstra, johnson,
|
7 |
+
bellman_ford, construct_dist_matrix,
|
8 |
+
NegativeCycleError)
|
9 |
+
import scipy.sparse
|
10 |
+
from scipy.io import mmread
|
11 |
+
import pytest
|
12 |
+
|
13 |
+
directed_G = np.array([[0, 3, 3, 0, 0],
|
14 |
+
[0, 0, 0, 2, 4],
|
15 |
+
[0, 0, 0, 0, 0],
|
16 |
+
[1, 0, 0, 0, 0],
|
17 |
+
[2, 0, 0, 2, 0]], dtype=float)
|
18 |
+
|
19 |
+
undirected_G = np.array([[0, 3, 3, 1, 2],
|
20 |
+
[3, 0, 0, 2, 4],
|
21 |
+
[3, 0, 0, 0, 0],
|
22 |
+
[1, 2, 0, 0, 2],
|
23 |
+
[2, 4, 0, 2, 0]], dtype=float)
|
24 |
+
|
25 |
+
unweighted_G = (directed_G > 0).astype(float)
|
26 |
+
|
27 |
+
directed_SP = [[0, 3, 3, 5, 7],
|
28 |
+
[3, 0, 6, 2, 4],
|
29 |
+
[np.inf, np.inf, 0, np.inf, np.inf],
|
30 |
+
[1, 4, 4, 0, 8],
|
31 |
+
[2, 5, 5, 2, 0]]
|
32 |
+
|
33 |
+
directed_sparse_zero_G = scipy.sparse.csr_matrix(([0, 1, 2, 3, 1],
|
34 |
+
([0, 1, 2, 3, 4],
|
35 |
+
[1, 2, 0, 4, 3])),
|
36 |
+
shape = (5, 5))
|
37 |
+
|
38 |
+
directed_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf],
|
39 |
+
[3, 0, 1, np.inf, np.inf],
|
40 |
+
[2, 2, 0, np.inf, np.inf],
|
41 |
+
[np.inf, np.inf, np.inf, 0, 3],
|
42 |
+
[np.inf, np.inf, np.inf, 1, 0]]
|
43 |
+
|
44 |
+
undirected_sparse_zero_G = scipy.sparse.csr_matrix(([0, 0, 1, 1, 2, 2, 1, 1],
|
45 |
+
([0, 1, 1, 2, 2, 0, 3, 4],
|
46 |
+
[1, 0, 2, 1, 0, 2, 4, 3])),
|
47 |
+
shape = (5, 5))
|
48 |
+
|
49 |
+
undirected_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf],
|
50 |
+
[0, 0, 1, np.inf, np.inf],
|
51 |
+
[1, 1, 0, np.inf, np.inf],
|
52 |
+
[np.inf, np.inf, np.inf, 0, 1],
|
53 |
+
[np.inf, np.inf, np.inf, 1, 0]]
|
54 |
+
|
55 |
+
directed_pred = np.array([[-9999, 0, 0, 1, 1],
|
56 |
+
[3, -9999, 0, 1, 1],
|
57 |
+
[-9999, -9999, -9999, -9999, -9999],
|
58 |
+
[3, 0, 0, -9999, 1],
|
59 |
+
[4, 0, 0, 4, -9999]], dtype=float)
|
60 |
+
|
61 |
+
undirected_SP = np.array([[0, 3, 3, 1, 2],
|
62 |
+
[3, 0, 6, 2, 4],
|
63 |
+
[3, 6, 0, 4, 5],
|
64 |
+
[1, 2, 4, 0, 2],
|
65 |
+
[2, 4, 5, 2, 0]], dtype=float)
|
66 |
+
|
67 |
+
undirected_SP_limit_2 = np.array([[0, np.inf, np.inf, 1, 2],
|
68 |
+
[np.inf, 0, np.inf, 2, np.inf],
|
69 |
+
[np.inf, np.inf, 0, np.inf, np.inf],
|
70 |
+
[1, 2, np.inf, 0, 2],
|
71 |
+
[2, np.inf, np.inf, 2, 0]], dtype=float)
|
72 |
+
|
73 |
+
undirected_SP_limit_0 = np.ones((5, 5), dtype=float) - np.eye(5)
|
74 |
+
undirected_SP_limit_0[undirected_SP_limit_0 > 0] = np.inf
|
75 |
+
|
76 |
+
undirected_pred = np.array([[-9999, 0, 0, 0, 0],
|
77 |
+
[1, -9999, 0, 1, 1],
|
78 |
+
[2, 0, -9999, 0, 0],
|
79 |
+
[3, 3, 0, -9999, 3],
|
80 |
+
[4, 4, 0, 4, -9999]], dtype=float)
|
81 |
+
|
82 |
+
directed_negative_weighted_G = np.array([[0, 0, 0],
|
83 |
+
[-1, 0, 0],
|
84 |
+
[0, -1, 0]], dtype=float)
|
85 |
+
|
86 |
+
directed_negative_weighted_SP = np.array([[0, np.inf, np.inf],
|
87 |
+
[-1, 0, np.inf],
|
88 |
+
[-2, -1, 0]], dtype=float)
|
89 |
+
|
90 |
+
methods = ['auto', 'FW', 'D', 'BF', 'J']
|
91 |
+
|
92 |
+
|
93 |
+
def test_dijkstra_limit():
|
94 |
+
limits = [0, 2, np.inf]
|
95 |
+
results = [undirected_SP_limit_0,
|
96 |
+
undirected_SP_limit_2,
|
97 |
+
undirected_SP]
|
98 |
+
|
99 |
+
def check(limit, result):
|
100 |
+
SP = dijkstra(undirected_G, directed=False, limit=limit)
|
101 |
+
assert_array_almost_equal(SP, result)
|
102 |
+
|
103 |
+
for limit, result in zip(limits, results):
|
104 |
+
check(limit, result)
|
105 |
+
|
106 |
+
|
107 |
+
def test_directed():
|
108 |
+
def check(method):
|
109 |
+
SP = shortest_path(directed_G, method=method, directed=True,
|
110 |
+
overwrite=False)
|
111 |
+
assert_array_almost_equal(SP, directed_SP)
|
112 |
+
|
113 |
+
for method in methods:
|
114 |
+
check(method)
|
115 |
+
|
116 |
+
|
117 |
+
def test_undirected():
|
118 |
+
def check(method, directed_in):
|
119 |
+
if directed_in:
|
120 |
+
SP1 = shortest_path(directed_G, method=method, directed=False,
|
121 |
+
overwrite=False)
|
122 |
+
assert_array_almost_equal(SP1, undirected_SP)
|
123 |
+
else:
|
124 |
+
SP2 = shortest_path(undirected_G, method=method, directed=True,
|
125 |
+
overwrite=False)
|
126 |
+
assert_array_almost_equal(SP2, undirected_SP)
|
127 |
+
|
128 |
+
for method in methods:
|
129 |
+
for directed_in in (True, False):
|
130 |
+
check(method, directed_in)
|
131 |
+
|
132 |
+
|
133 |
+
def test_directed_sparse_zero():
|
134 |
+
# test directed sparse graph with zero-weight edge and two connected components
|
135 |
+
def check(method):
|
136 |
+
SP = shortest_path(directed_sparse_zero_G, method=method, directed=True,
|
137 |
+
overwrite=False)
|
138 |
+
assert_array_almost_equal(SP, directed_sparse_zero_SP)
|
139 |
+
|
140 |
+
for method in methods:
|
141 |
+
check(method)
|
142 |
+
|
143 |
+
|
144 |
+
def test_undirected_sparse_zero():
|
145 |
+
def check(method, directed_in):
|
146 |
+
if directed_in:
|
147 |
+
SP1 = shortest_path(directed_sparse_zero_G, method=method, directed=False,
|
148 |
+
overwrite=False)
|
149 |
+
assert_array_almost_equal(SP1, undirected_sparse_zero_SP)
|
150 |
+
else:
|
151 |
+
SP2 = shortest_path(undirected_sparse_zero_G, method=method, directed=True,
|
152 |
+
overwrite=False)
|
153 |
+
assert_array_almost_equal(SP2, undirected_sparse_zero_SP)
|
154 |
+
|
155 |
+
for method in methods:
|
156 |
+
for directed_in in (True, False):
|
157 |
+
check(method, directed_in)
|
158 |
+
|
159 |
+
|
160 |
+
@pytest.mark.parametrize('directed, SP_ans',
|
161 |
+
((True, directed_SP),
|
162 |
+
(False, undirected_SP)))
|
163 |
+
@pytest.mark.parametrize('indices', ([0, 2, 4], [0, 4], [3, 4], [0, 0]))
|
164 |
+
def test_dijkstra_indices_min_only(directed, SP_ans, indices):
|
165 |
+
SP_ans = np.array(SP_ans)
|
166 |
+
indices = np.array(indices, dtype=np.int64)
|
167 |
+
min_ind_ans = indices[np.argmin(SP_ans[indices, :], axis=0)]
|
168 |
+
min_d_ans = np.zeros(SP_ans.shape[0], SP_ans.dtype)
|
169 |
+
for k in range(SP_ans.shape[0]):
|
170 |
+
min_d_ans[k] = SP_ans[min_ind_ans[k], k]
|
171 |
+
min_ind_ans[np.isinf(min_d_ans)] = -9999
|
172 |
+
|
173 |
+
SP, pred, sources = dijkstra(directed_G,
|
174 |
+
directed=directed,
|
175 |
+
indices=indices,
|
176 |
+
min_only=True,
|
177 |
+
return_predecessors=True)
|
178 |
+
assert_array_almost_equal(SP, min_d_ans)
|
179 |
+
assert_array_equal(min_ind_ans, sources)
|
180 |
+
SP = dijkstra(directed_G,
|
181 |
+
directed=directed,
|
182 |
+
indices=indices,
|
183 |
+
min_only=True,
|
184 |
+
return_predecessors=False)
|
185 |
+
assert_array_almost_equal(SP, min_d_ans)
|
186 |
+
|
187 |
+
|
188 |
+
@pytest.mark.parametrize('n', (10, 100, 1000))
|
189 |
+
def test_dijkstra_min_only_random(n):
|
190 |
+
np.random.seed(1234)
|
191 |
+
data = scipy.sparse.rand(n, n, density=0.5, format='lil',
|
192 |
+
random_state=42, dtype=np.float64)
|
193 |
+
data.setdiag(np.zeros(n, dtype=np.bool_))
|
194 |
+
# choose some random vertices
|
195 |
+
v = np.arange(n)
|
196 |
+
np.random.shuffle(v)
|
197 |
+
indices = v[:int(n*.1)]
|
198 |
+
ds, pred, sources = dijkstra(data,
|
199 |
+
directed=True,
|
200 |
+
indices=indices,
|
201 |
+
min_only=True,
|
202 |
+
return_predecessors=True)
|
203 |
+
for k in range(n):
|
204 |
+
p = pred[k]
|
205 |
+
s = sources[k]
|
206 |
+
while p != -9999:
|
207 |
+
assert sources[p] == s
|
208 |
+
p = pred[p]
|
209 |
+
|
210 |
+
|
211 |
+
def test_dijkstra_random():
|
212 |
+
# reproduces the hang observed in gh-17782
|
213 |
+
n = 10
|
214 |
+
indices = [0, 4, 4, 5, 7, 9, 0, 6, 2, 3, 7, 9, 1, 2, 9, 2, 5, 6]
|
215 |
+
indptr = [0, 0, 2, 5, 6, 7, 8, 12, 15, 18, 18]
|
216 |
+
data = [0.33629, 0.40458, 0.47493, 0.42757, 0.11497, 0.91653, 0.69084,
|
217 |
+
0.64979, 0.62555, 0.743, 0.01724, 0.99945, 0.31095, 0.15557,
|
218 |
+
0.02439, 0.65814, 0.23478, 0.24072]
|
219 |
+
graph = scipy.sparse.csr_matrix((data, indices, indptr), shape=(n, n))
|
220 |
+
dijkstra(graph, directed=True, return_predecessors=True)
|
221 |
+
|
222 |
+
|
223 |
+
def test_gh_17782_segfault():
|
224 |
+
text = """%%MatrixMarket matrix coordinate real general
|
225 |
+
84 84 22
|
226 |
+
2 1 4.699999809265137e+00
|
227 |
+
6 14 1.199999973177910e-01
|
228 |
+
9 6 1.199999973177910e-01
|
229 |
+
10 16 2.012000083923340e+01
|
230 |
+
11 10 1.422000026702881e+01
|
231 |
+
12 1 9.645999908447266e+01
|
232 |
+
13 18 2.012000083923340e+01
|
233 |
+
14 13 4.679999828338623e+00
|
234 |
+
15 11 1.199999973177910e-01
|
235 |
+
16 12 1.199999973177910e-01
|
236 |
+
18 15 1.199999973177910e-01
|
237 |
+
32 2 2.299999952316284e+00
|
238 |
+
33 20 6.000000000000000e+00
|
239 |
+
33 32 5.000000000000000e+00
|
240 |
+
36 9 3.720000028610229e+00
|
241 |
+
36 37 3.720000028610229e+00
|
242 |
+
36 38 3.720000028610229e+00
|
243 |
+
37 44 8.159999847412109e+00
|
244 |
+
38 32 7.903999328613281e+01
|
245 |
+
43 20 2.400000000000000e+01
|
246 |
+
43 33 4.000000000000000e+00
|
247 |
+
44 43 6.028000259399414e+01
|
248 |
+
"""
|
249 |
+
data = mmread(StringIO(text))
|
250 |
+
dijkstra(data, directed=True, return_predecessors=True)
|
251 |
+
|
252 |
+
|
253 |
+
def test_shortest_path_indices():
|
254 |
+
indices = np.arange(4)
|
255 |
+
|
256 |
+
def check(func, indshape):
|
257 |
+
outshape = indshape + (5,)
|
258 |
+
SP = func(directed_G, directed=False,
|
259 |
+
indices=indices.reshape(indshape))
|
260 |
+
assert_array_almost_equal(SP, undirected_SP[indices].reshape(outshape))
|
261 |
+
|
262 |
+
for indshape in [(4,), (4, 1), (2, 2)]:
|
263 |
+
for func in (dijkstra, bellman_ford, johnson, shortest_path):
|
264 |
+
check(func, indshape)
|
265 |
+
|
266 |
+
assert_raises(ValueError, shortest_path, directed_G, method='FW',
|
267 |
+
indices=indices)
|
268 |
+
|
269 |
+
|
270 |
+
def test_predecessors():
|
271 |
+
SP_res = {True: directed_SP,
|
272 |
+
False: undirected_SP}
|
273 |
+
pred_res = {True: directed_pred,
|
274 |
+
False: undirected_pred}
|
275 |
+
|
276 |
+
def check(method, directed):
|
277 |
+
SP, pred = shortest_path(directed_G, method, directed=directed,
|
278 |
+
overwrite=False,
|
279 |
+
return_predecessors=True)
|
280 |
+
assert_array_almost_equal(SP, SP_res[directed])
|
281 |
+
assert_array_almost_equal(pred, pred_res[directed])
|
282 |
+
|
283 |
+
for method in methods:
|
284 |
+
for directed in (True, False):
|
285 |
+
check(method, directed)
|
286 |
+
|
287 |
+
|
288 |
+
def test_construct_shortest_path():
|
289 |
+
def check(method, directed):
|
290 |
+
SP1, pred = shortest_path(directed_G,
|
291 |
+
directed=directed,
|
292 |
+
overwrite=False,
|
293 |
+
return_predecessors=True)
|
294 |
+
SP2 = construct_dist_matrix(directed_G, pred, directed=directed)
|
295 |
+
assert_array_almost_equal(SP1, SP2)
|
296 |
+
|
297 |
+
for method in methods:
|
298 |
+
for directed in (True, False):
|
299 |
+
check(method, directed)
|
300 |
+
|
301 |
+
|
302 |
+
def test_unweighted_path():
|
303 |
+
def check(method, directed):
|
304 |
+
SP1 = shortest_path(directed_G,
|
305 |
+
directed=directed,
|
306 |
+
overwrite=False,
|
307 |
+
unweighted=True)
|
308 |
+
SP2 = shortest_path(unweighted_G,
|
309 |
+
directed=directed,
|
310 |
+
overwrite=False,
|
311 |
+
unweighted=False)
|
312 |
+
assert_array_almost_equal(SP1, SP2)
|
313 |
+
|
314 |
+
for method in methods:
|
315 |
+
for directed in (True, False):
|
316 |
+
check(method, directed)
|
317 |
+
|
318 |
+
|
319 |
+
def test_negative_cycles():
|
320 |
+
# create a small graph with a negative cycle
|
321 |
+
graph = np.ones([5, 5])
|
322 |
+
graph.flat[::6] = 0
|
323 |
+
graph[1, 2] = -2
|
324 |
+
|
325 |
+
def check(method, directed):
|
326 |
+
assert_raises(NegativeCycleError, shortest_path, graph, method,
|
327 |
+
directed)
|
328 |
+
|
329 |
+
for method in ['FW', 'J', 'BF']:
|
330 |
+
for directed in (True, False):
|
331 |
+
check(method, directed)
|
332 |
+
|
333 |
+
|
334 |
+
@pytest.mark.parametrize("method", ['FW', 'J', 'BF'])
|
335 |
+
def test_negative_weights(method):
|
336 |
+
SP = shortest_path(directed_negative_weighted_G, method, directed=True)
|
337 |
+
assert_allclose(SP, directed_negative_weighted_SP, atol=1e-10)
|
338 |
+
|
339 |
+
|
340 |
+
def test_masked_input():
|
341 |
+
np.ma.masked_equal(directed_G, 0)
|
342 |
+
|
343 |
+
def check(method):
|
344 |
+
SP = shortest_path(directed_G, method=method, directed=True,
|
345 |
+
overwrite=False)
|
346 |
+
assert_array_almost_equal(SP, directed_SP)
|
347 |
+
|
348 |
+
for method in methods:
|
349 |
+
check(method)
|
350 |
+
|
351 |
+
|
352 |
+
def test_overwrite():
|
353 |
+
G = np.array([[0, 3, 3, 1, 2],
|
354 |
+
[3, 0, 0, 2, 4],
|
355 |
+
[3, 0, 0, 0, 0],
|
356 |
+
[1, 2, 0, 0, 2],
|
357 |
+
[2, 4, 0, 2, 0]], dtype=float)
|
358 |
+
foo = G.copy()
|
359 |
+
shortest_path(foo, overwrite=False)
|
360 |
+
assert_array_equal(foo, G)
|
361 |
+
|
362 |
+
|
363 |
+
@pytest.mark.parametrize('method', methods)
|
364 |
+
def test_buffer(method):
|
365 |
+
# Smoke test that sparse matrices with read-only buffers (e.g., those from
|
366 |
+
# joblib workers) do not cause::
|
367 |
+
#
|
368 |
+
# ValueError: buffer source array is read-only
|
369 |
+
#
|
370 |
+
G = scipy.sparse.csr_matrix([[1.]])
|
371 |
+
G.data.flags['WRITEABLE'] = False
|
372 |
+
shortest_path(G, method=method)
|
373 |
+
|
374 |
+
|
375 |
+
def test_NaN_warnings():
|
376 |
+
with warnings.catch_warnings(record=True) as record:
|
377 |
+
shortest_path(np.array([[0, 1], [np.nan, 0]]))
|
378 |
+
for r in record:
|
379 |
+
assert r.category is not RuntimeWarning
|
380 |
+
|
381 |
+
|
382 |
+
def test_sparse_matrices():
|
383 |
+
# Test that using lil,csr and csc sparse matrix do not cause error
|
384 |
+
G_dense = np.array([[0, 3, 0, 0, 0],
|
385 |
+
[0, 0, -1, 0, 0],
|
386 |
+
[0, 0, 0, 2, 0],
|
387 |
+
[0, 0, 0, 0, 4],
|
388 |
+
[0, 0, 0, 0, 0]], dtype=float)
|
389 |
+
SP = shortest_path(G_dense)
|
390 |
+
G_csr = scipy.sparse.csr_matrix(G_dense)
|
391 |
+
G_csc = scipy.sparse.csc_matrix(G_dense)
|
392 |
+
G_lil = scipy.sparse.lil_matrix(G_dense)
|
393 |
+
assert_array_almost_equal(SP, shortest_path(G_csr))
|
394 |
+
assert_array_almost_equal(SP, shortest_path(G_csc))
|
395 |
+
assert_array_almost_equal(SP, shortest_path(G_lil))
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Test the minimum spanning tree function"""
|
2 |
+
import numpy as np
|
3 |
+
from numpy.testing import assert_
|
4 |
+
import numpy.testing as npt
|
5 |
+
from scipy.sparse import csr_matrix
|
6 |
+
from scipy.sparse.csgraph import minimum_spanning_tree
|
7 |
+
|
8 |
+
|
9 |
+
def test_minimum_spanning_tree():
|
10 |
+
|
11 |
+
# Create a graph with two connected components.
|
12 |
+
graph = [[0,1,0,0,0],
|
13 |
+
[1,0,0,0,0],
|
14 |
+
[0,0,0,8,5],
|
15 |
+
[0,0,8,0,1],
|
16 |
+
[0,0,5,1,0]]
|
17 |
+
graph = np.asarray(graph)
|
18 |
+
|
19 |
+
# Create the expected spanning tree.
|
20 |
+
expected = [[0,1,0,0,0],
|
21 |
+
[0,0,0,0,0],
|
22 |
+
[0,0,0,0,5],
|
23 |
+
[0,0,0,0,1],
|
24 |
+
[0,0,0,0,0]]
|
25 |
+
expected = np.asarray(expected)
|
26 |
+
|
27 |
+
# Ensure minimum spanning tree code gives this expected output.
|
28 |
+
csgraph = csr_matrix(graph)
|
29 |
+
mintree = minimum_spanning_tree(csgraph)
|
30 |
+
mintree_array = mintree.toarray()
|
31 |
+
npt.assert_array_equal(mintree_array, expected,
|
32 |
+
'Incorrect spanning tree found.')
|
33 |
+
|
34 |
+
# Ensure that the original graph was not modified.
|
35 |
+
npt.assert_array_equal(csgraph.toarray(), graph,
|
36 |
+
'Original graph was modified.')
|
37 |
+
|
38 |
+
# Now let the algorithm modify the csgraph in place.
|
39 |
+
mintree = minimum_spanning_tree(csgraph, overwrite=True)
|
40 |
+
npt.assert_array_equal(mintree.toarray(), expected,
|
41 |
+
'Graph was not properly modified to contain MST.')
|
42 |
+
|
43 |
+
np.random.seed(1234)
|
44 |
+
for N in (5, 10, 15, 20):
|
45 |
+
|
46 |
+
# Create a random graph.
|
47 |
+
graph = 3 + np.random.random((N, N))
|
48 |
+
csgraph = csr_matrix(graph)
|
49 |
+
|
50 |
+
# The spanning tree has at most N - 1 edges.
|
51 |
+
mintree = minimum_spanning_tree(csgraph)
|
52 |
+
assert_(mintree.nnz < N)
|
53 |
+
|
54 |
+
# Set the sub diagonal to 1 to create a known spanning tree.
|
55 |
+
idx = np.arange(N-1)
|
56 |
+
graph[idx,idx+1] = 1
|
57 |
+
csgraph = csr_matrix(graph)
|
58 |
+
mintree = minimum_spanning_tree(csgraph)
|
59 |
+
|
60 |
+
# We expect to see this pattern in the spanning tree and otherwise
|
61 |
+
# have this zero.
|
62 |
+
expected = np.zeros((N, N))
|
63 |
+
expected[idx, idx+1] = 1
|
64 |
+
|
65 |
+
npt.assert_array_equal(mintree.toarray(), expected,
|
66 |
+
'Incorrect spanning tree found.')
|
venv/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_traversal.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
from numpy.testing import assert_array_almost_equal
|
4 |
+
from scipy.sparse import csr_array
|
5 |
+
from scipy.sparse.csgraph import (breadth_first_tree, depth_first_tree,
|
6 |
+
csgraph_to_dense, csgraph_from_dense)
|
7 |
+
|
8 |
+
|
9 |
+
def test_graph_breadth_first():
|
10 |
+
csgraph = np.array([[0, 1, 2, 0, 0],
|
11 |
+
[1, 0, 0, 0, 3],
|
12 |
+
[2, 0, 0, 7, 0],
|
13 |
+
[0, 0, 7, 0, 1],
|
14 |
+
[0, 3, 0, 1, 0]])
|
15 |
+
csgraph = csgraph_from_dense(csgraph, null_value=0)
|
16 |
+
|
17 |
+
bfirst = np.array([[0, 1, 2, 0, 0],
|
18 |
+
[0, 0, 0, 0, 3],
|
19 |
+
[0, 0, 0, 7, 0],
|
20 |
+
[0, 0, 0, 0, 0],
|
21 |
+
[0, 0, 0, 0, 0]])
|
22 |
+
|
23 |
+
for directed in [True, False]:
|
24 |
+
bfirst_test = breadth_first_tree(csgraph, 0, directed)
|
25 |
+
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
|
26 |
+
bfirst)
|
27 |
+
|
28 |
+
|
29 |
+
def test_graph_depth_first():
|
30 |
+
csgraph = np.array([[0, 1, 2, 0, 0],
|
31 |
+
[1, 0, 0, 0, 3],
|
32 |
+
[2, 0, 0, 7, 0],
|
33 |
+
[0, 0, 7, 0, 1],
|
34 |
+
[0, 3, 0, 1, 0]])
|
35 |
+
csgraph = csgraph_from_dense(csgraph, null_value=0)
|
36 |
+
|
37 |
+
dfirst = np.array([[0, 1, 0, 0, 0],
|
38 |
+
[0, 0, 0, 0, 3],
|
39 |
+
[0, 0, 0, 0, 0],
|
40 |
+
[0, 0, 7, 0, 0],
|
41 |
+
[0, 0, 0, 1, 0]])
|
42 |
+
|
43 |
+
for directed in [True, False]:
|
44 |
+
dfirst_test = depth_first_tree(csgraph, 0, directed)
|
45 |
+
assert_array_almost_equal(csgraph_to_dense(dfirst_test),
|
46 |
+
dfirst)
|
47 |
+
|
48 |
+
|
49 |
+
def test_graph_breadth_first_trivial_graph():
|
50 |
+
csgraph = np.array([[0]])
|
51 |
+
csgraph = csgraph_from_dense(csgraph, null_value=0)
|
52 |
+
|
53 |
+
bfirst = np.array([[0]])
|
54 |
+
|
55 |
+
for directed in [True, False]:
|
56 |
+
bfirst_test = breadth_first_tree(csgraph, 0, directed)
|
57 |
+
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
|
58 |
+
bfirst)
|
59 |
+
|
60 |
+
|
61 |
+
def test_graph_depth_first_trivial_graph():
|
62 |
+
csgraph = np.array([[0]])
|
63 |
+
csgraph = csgraph_from_dense(csgraph, null_value=0)
|
64 |
+
|
65 |
+
bfirst = np.array([[0]])
|
66 |
+
|
67 |
+
for directed in [True, False]:
|
68 |
+
bfirst_test = depth_first_tree(csgraph, 0, directed)
|
69 |
+
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
|
70 |
+
bfirst)
|
71 |
+
|
72 |
+
|
73 |
+
@pytest.mark.parametrize('directed', [True, False])
|
74 |
+
@pytest.mark.parametrize('tree_func', [breadth_first_tree, depth_first_tree])
|
75 |
+
def test_int64_indices(tree_func, directed):
|
76 |
+
# See https://github.com/scipy/scipy/issues/18716
|
77 |
+
g = csr_array(([1], np.array([[0], [1]], dtype=np.int64)), shape=(2, 2))
|
78 |
+
assert g.indices.dtype == np.int64
|
79 |
+
tree = tree_func(g, 0, directed=directed)
|
80 |
+
assert_array_almost_equal(csgraph_to_dense(tree), [[0, 1], [0, 0]])
|
81 |
+
|
venv/lib/python3.10/site-packages/scipy/sparse/linalg/__init__.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Sparse linear algebra (:mod:`scipy.sparse.linalg`)
|
3 |
+
==================================================
|
4 |
+
|
5 |
+
.. currentmodule:: scipy.sparse.linalg
|
6 |
+
|
7 |
+
Abstract linear operators
|
8 |
+
-------------------------
|
9 |
+
|
10 |
+
.. autosummary::
|
11 |
+
:toctree: generated/
|
12 |
+
|
13 |
+
LinearOperator -- abstract representation of a linear operator
|
14 |
+
aslinearoperator -- convert an object to an abstract linear operator
|
15 |
+
|
16 |
+
Matrix Operations
|
17 |
+
-----------------
|
18 |
+
|
19 |
+
.. autosummary::
|
20 |
+
:toctree: generated/
|
21 |
+
|
22 |
+
inv -- compute the sparse matrix inverse
|
23 |
+
expm -- compute the sparse matrix exponential
|
24 |
+
expm_multiply -- compute the product of a matrix exponential and a matrix
|
25 |
+
matrix_power -- compute the matrix power by raising a matrix to an exponent
|
26 |
+
|
27 |
+
Matrix norms
|
28 |
+
------------
|
29 |
+
|
30 |
+
.. autosummary::
|
31 |
+
:toctree: generated/
|
32 |
+
|
33 |
+
norm -- Norm of a sparse matrix
|
34 |
+
onenormest -- Estimate the 1-norm of a sparse matrix
|
35 |
+
|
36 |
+
Solving linear problems
|
37 |
+
-----------------------
|
38 |
+
|
39 |
+
Direct methods for linear equation systems:
|
40 |
+
|
41 |
+
.. autosummary::
|
42 |
+
:toctree: generated/
|
43 |
+
|
44 |
+
spsolve -- Solve the sparse linear system Ax=b
|
45 |
+
spsolve_triangular -- Solve sparse linear system Ax=b for a triangular A.
|
46 |
+
factorized -- Pre-factorize matrix to a function solving a linear system
|
47 |
+
MatrixRankWarning -- Warning on exactly singular matrices
|
48 |
+
use_solver -- Select direct solver to use
|
49 |
+
|
50 |
+
Iterative methods for linear equation systems:
|
51 |
+
|
52 |
+
.. autosummary::
|
53 |
+
:toctree: generated/
|
54 |
+
|
55 |
+
bicg -- Use BIConjugate Gradient iteration to solve Ax = b
|
56 |
+
bicgstab -- Use BIConjugate Gradient STABilized iteration to solve Ax = b
|
57 |
+
cg -- Use Conjugate Gradient iteration to solve Ax = b
|
58 |
+
cgs -- Use Conjugate Gradient Squared iteration to solve Ax = b
|
59 |
+
gmres -- Use Generalized Minimal RESidual iteration to solve Ax = b
|
60 |
+
lgmres -- Solve a matrix equation using the LGMRES algorithm
|
61 |
+
minres -- Use MINimum RESidual iteration to solve Ax = b
|
62 |
+
qmr -- Use Quasi-Minimal Residual iteration to solve Ax = b
|
63 |
+
gcrotmk -- Solve a matrix equation using the GCROT(m,k) algorithm
|
64 |
+
tfqmr -- Use Transpose-Free Quasi-Minimal Residual iteration to solve Ax = b
|
65 |
+
|
66 |
+
Iterative methods for least-squares problems:
|
67 |
+
|
68 |
+
.. autosummary::
|
69 |
+
:toctree: generated/
|
70 |
+
|
71 |
+
lsqr -- Find the least-squares solution to a sparse linear equation system
|
72 |
+
lsmr -- Find the least-squares solution to a sparse linear equation system
|
73 |
+
|
74 |
+
Matrix factorizations
|
75 |
+
---------------------
|
76 |
+
|
77 |
+
Eigenvalue problems:
|
78 |
+
|
79 |
+
.. autosummary::
|
80 |
+
:toctree: generated/
|
81 |
+
|
82 |
+
eigs -- Find k eigenvalues and eigenvectors of the square matrix A
|
83 |
+
eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix
|
84 |
+
lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning
|
85 |
+
|
86 |
+
Singular values problems:
|
87 |
+
|
88 |
+
.. autosummary::
|
89 |
+
:toctree: generated/
|
90 |
+
|
91 |
+
svds -- Compute k singular values/vectors for a sparse matrix
|
92 |
+
|
93 |
+
The `svds` function supports the following solvers:
|
94 |
+
|
95 |
+
.. toctree::
|
96 |
+
|
97 |
+
sparse.linalg.svds-arpack
|
98 |
+
sparse.linalg.svds-lobpcg
|
99 |
+
sparse.linalg.svds-propack
|
100 |
+
|
101 |
+
Complete or incomplete LU factorizations
|
102 |
+
|
103 |
+
.. autosummary::
|
104 |
+
:toctree: generated/
|
105 |
+
|
106 |
+
splu -- Compute a LU decomposition for a sparse matrix
|
107 |
+
spilu -- Compute an incomplete LU decomposition for a sparse matrix
|
108 |
+
SuperLU -- Object representing an LU factorization
|
109 |
+
|
110 |
+
Sparse arrays with structure
|
111 |
+
----------------------------
|
112 |
+
|
113 |
+
.. autosummary::
|
114 |
+
:toctree: generated/
|
115 |
+
|
116 |
+
LaplacianNd -- Laplacian on a uniform rectangular grid in ``N`` dimensions
|
117 |
+
|
118 |
+
Exceptions
|
119 |
+
----------
|
120 |
+
|
121 |
+
.. autosummary::
|
122 |
+
:toctree: generated/
|
123 |
+
|
124 |
+
ArpackNoConvergence
|
125 |
+
ArpackError
|
126 |
+
|
127 |
+
"""
|
128 |
+
|
129 |
+
from ._isolve import *
|
130 |
+
from ._dsolve import *
|
131 |
+
from ._interface import *
|
132 |
+
from ._eigen import *
|
133 |
+
from ._matfuncs import *
|
134 |
+
from ._onenormest import *
|
135 |
+
from ._norm import *
|
136 |
+
from ._expm_multiply import *
|
137 |
+
from ._special_sparse_arrays import *
|
138 |
+
|
139 |
+
# Deprecated namespaces, to be removed in v2.0.0
|
140 |
+
from . import isolve, dsolve, interface, eigen, matfuncs
|
141 |
+
|
142 |
+
__all__ = [s for s in dir() if not s.startswith('_')]
|
143 |
+
|
144 |
+
from scipy._lib._testutils import PytestTester
|
145 |
+
test = PytestTester(__name__)
|
146 |
+
del PytestTester
|
venv/lib/python3.10/site-packages/scipy/sparse/linalg/_expm_multiply.py
ADDED
@@ -0,0 +1,810 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Compute the action of the matrix exponential."""
|
2 |
+
from warnings import warn
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
import scipy.linalg
|
7 |
+
import scipy.sparse.linalg
|
8 |
+
from scipy.linalg._decomp_qr import qr
|
9 |
+
from scipy.sparse._sputils import is_pydata_spmatrix
|
10 |
+
from scipy.sparse.linalg import aslinearoperator
|
11 |
+
from scipy.sparse.linalg._interface import IdentityOperator
|
12 |
+
from scipy.sparse.linalg._onenormest import onenormest
|
13 |
+
|
14 |
+
__all__ = ['expm_multiply']
|
15 |
+
|
16 |
+
|
17 |
+
def _exact_inf_norm(A):
|
18 |
+
# A compatibility function which should eventually disappear.
|
19 |
+
if scipy.sparse.issparse(A):
|
20 |
+
return max(abs(A).sum(axis=1).flat)
|
21 |
+
elif is_pydata_spmatrix(A):
|
22 |
+
return max(abs(A).sum(axis=1))
|
23 |
+
else:
|
24 |
+
return np.linalg.norm(A, np.inf)
|
25 |
+
|
26 |
+
|
27 |
+
def _exact_1_norm(A):
|
28 |
+
# A compatibility function which should eventually disappear.
|
29 |
+
if scipy.sparse.issparse(A):
|
30 |
+
return max(abs(A).sum(axis=0).flat)
|
31 |
+
elif is_pydata_spmatrix(A):
|
32 |
+
return max(abs(A).sum(axis=0))
|
33 |
+
else:
|
34 |
+
return np.linalg.norm(A, 1)
|
35 |
+
|
36 |
+
|
37 |
+
def _trace(A):
|
38 |
+
# A compatibility function which should eventually disappear.
|
39 |
+
if is_pydata_spmatrix(A):
|
40 |
+
return A.to_scipy_sparse().trace()
|
41 |
+
else:
|
42 |
+
return A.trace()
|
43 |
+
|
44 |
+
|
45 |
+
def traceest(A, m3, seed=None):
|
46 |
+
"""Estimate `np.trace(A)` using `3*m3` matrix-vector products.
|
47 |
+
|
48 |
+
The result is not deterministic.
|
49 |
+
|
50 |
+
Parameters
|
51 |
+
----------
|
52 |
+
A : LinearOperator
|
53 |
+
Linear operator whose trace will be estimated. Has to be square.
|
54 |
+
m3 : int
|
55 |
+
Number of matrix-vector products divided by 3 used to estimate the
|
56 |
+
trace.
|
57 |
+
seed : optional
|
58 |
+
Seed for `numpy.random.default_rng`.
|
59 |
+
Can be provided to obtain deterministic results.
|
60 |
+
|
61 |
+
Returns
|
62 |
+
-------
|
63 |
+
trace : LinearOperator.dtype
|
64 |
+
Estimate of the trace
|
65 |
+
|
66 |
+
Notes
|
67 |
+
-----
|
68 |
+
This is the Hutch++ algorithm given in [1]_.
|
69 |
+
|
70 |
+
References
|
71 |
+
----------
|
72 |
+
.. [1] Meyer, Raphael A., Cameron Musco, Christopher Musco, and David P.
|
73 |
+
Woodruff. "Hutch++: Optimal Stochastic Trace Estimation." In Symposium
|
74 |
+
on Simplicity in Algorithms (SOSA), pp. 142-155. Society for Industrial
|
75 |
+
and Applied Mathematics, 2021
|
76 |
+
https://doi.org/10.1137/1.9781611976496.16
|
77 |
+
|
78 |
+
"""
|
79 |
+
rng = np.random.default_rng(seed)
|
80 |
+
if len(A.shape) != 2 or A.shape[-1] != A.shape[-2]:
|
81 |
+
raise ValueError("Expected A to be like a square matrix.")
|
82 |
+
n = A.shape[-1]
|
83 |
+
S = rng.choice([-1.0, +1.0], [n, m3])
|
84 |
+
Q, _ = qr(A.matmat(S), overwrite_a=True, mode='economic')
|
85 |
+
trQAQ = np.trace(Q.conj().T @ A.matmat(Q))
|
86 |
+
G = rng.choice([-1, +1], [n, m3])
|
87 |
+
right = G - Q@(Q.conj().T @ G)
|
88 |
+
trGAG = np.trace(right.conj().T @ A.matmat(right))
|
89 |
+
return trQAQ + trGAG/m3
|
90 |
+
|
91 |
+
|
92 |
+
def _ident_like(A):
|
93 |
+
# A compatibility function which should eventually disappear.
|
94 |
+
if scipy.sparse.issparse(A):
|
95 |
+
# Creates a sparse matrix in dia format
|
96 |
+
out = scipy.sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype)
|
97 |
+
if isinstance(A, scipy.sparse.spmatrix):
|
98 |
+
return out.asformat(A.format)
|
99 |
+
return scipy.sparse.dia_array(out).asformat(A.format)
|
100 |
+
elif is_pydata_spmatrix(A):
|
101 |
+
import sparse
|
102 |
+
return sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype)
|
103 |
+
elif isinstance(A, scipy.sparse.linalg.LinearOperator):
|
104 |
+
return IdentityOperator(A.shape, dtype=A.dtype)
|
105 |
+
else:
|
106 |
+
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
|
107 |
+
|
108 |
+
|
109 |
+
def expm_multiply(A, B, start=None, stop=None, num=None,
|
110 |
+
endpoint=None, traceA=None):
|
111 |
+
"""
|
112 |
+
Compute the action of the matrix exponential of A on B.
|
113 |
+
|
114 |
+
Parameters
|
115 |
+
----------
|
116 |
+
A : transposable linear operator
|
117 |
+
The operator whose exponential is of interest.
|
118 |
+
B : ndarray
|
119 |
+
The matrix or vector to be multiplied by the matrix exponential of A.
|
120 |
+
start : scalar, optional
|
121 |
+
The starting time point of the sequence.
|
122 |
+
stop : scalar, optional
|
123 |
+
The end time point of the sequence, unless `endpoint` is set to False.
|
124 |
+
In that case, the sequence consists of all but the last of ``num + 1``
|
125 |
+
evenly spaced time points, so that `stop` is excluded.
|
126 |
+
Note that the step size changes when `endpoint` is False.
|
127 |
+
num : int, optional
|
128 |
+
Number of time points to use.
|
129 |
+
endpoint : bool, optional
|
130 |
+
If True, `stop` is the last time point. Otherwise, it is not included.
|
131 |
+
traceA : scalar, optional
|
132 |
+
Trace of `A`. If not given the trace is estimated for linear operators,
|
133 |
+
or calculated exactly for sparse matrices. It is used to precondition
|
134 |
+
`A`, thus an approximate trace is acceptable.
|
135 |
+
For linear operators, `traceA` should be provided to ensure performance
|
136 |
+
as the estimation is not guaranteed to be reliable for all cases.
|
137 |
+
|
138 |
+
.. versionadded:: 1.9.0
|
139 |
+
|
140 |
+
Returns
|
141 |
+
-------
|
142 |
+
expm_A_B : ndarray
|
143 |
+
The result of the action :math:`e^{t_k A} B`.
|
144 |
+
|
145 |
+
Warns
|
146 |
+
-----
|
147 |
+
UserWarning
|
148 |
+
If `A` is a linear operator and ``traceA=None`` (default).
|
149 |
+
|
150 |
+
Notes
|
151 |
+
-----
|
152 |
+
The optional arguments defining the sequence of evenly spaced time points
|
153 |
+
are compatible with the arguments of `numpy.linspace`.
|
154 |
+
|
155 |
+
The output ndarray shape is somewhat complicated so I explain it here.
|
156 |
+
The ndim of the output could be either 1, 2, or 3.
|
157 |
+
It would be 1 if you are computing the expm action on a single vector
|
158 |
+
at a single time point.
|
159 |
+
It would be 2 if you are computing the expm action on a vector
|
160 |
+
at multiple time points, or if you are computing the expm action
|
161 |
+
on a matrix at a single time point.
|
162 |
+
It would be 3 if you want the action on a matrix with multiple
|
163 |
+
columns at multiple time points.
|
164 |
+
If multiple time points are requested, expm_A_B[0] will always
|
165 |
+
be the action of the expm at the first time point,
|
166 |
+
regardless of whether the action is on a vector or a matrix.
|
167 |
+
|
168 |
+
References
|
169 |
+
----------
|
170 |
+
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011)
|
171 |
+
"Computing the Action of the Matrix Exponential,
|
172 |
+
with an Application to Exponential Integrators."
|
173 |
+
SIAM Journal on Scientific Computing,
|
174 |
+
33 (2). pp. 488-511. ISSN 1064-8275
|
175 |
+
http://eprints.ma.man.ac.uk/1591/
|
176 |
+
|
177 |
+
.. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010)
|
178 |
+
"Computing Matrix Functions."
|
179 |
+
Acta Numerica,
|
180 |
+
19. 159-208. ISSN 0962-4929
|
181 |
+
http://eprints.ma.man.ac.uk/1451/
|
182 |
+
|
183 |
+
Examples
|
184 |
+
--------
|
185 |
+
>>> import numpy as np
|
186 |
+
>>> from scipy.sparse import csc_matrix
|
187 |
+
>>> from scipy.sparse.linalg import expm, expm_multiply
|
188 |
+
>>> A = csc_matrix([[1, 0], [0, 1]])
|
189 |
+
>>> A.toarray()
|
190 |
+
array([[1, 0],
|
191 |
+
[0, 1]], dtype=int64)
|
192 |
+
>>> B = np.array([np.exp(-1.), np.exp(-2.)])
|
193 |
+
>>> B
|
194 |
+
array([ 0.36787944, 0.13533528])
|
195 |
+
>>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True)
|
196 |
+
array([[ 1. , 0.36787944],
|
197 |
+
[ 1.64872127, 0.60653066],
|
198 |
+
[ 2.71828183, 1. ]])
|
199 |
+
>>> expm(A).dot(B) # Verify 1st timestep
|
200 |
+
array([ 1. , 0.36787944])
|
201 |
+
>>> expm(1.5*A).dot(B) # Verify 2nd timestep
|
202 |
+
array([ 1.64872127, 0.60653066])
|
203 |
+
>>> expm(2*A).dot(B) # Verify 3rd timestep
|
204 |
+
array([ 2.71828183, 1. ])
|
205 |
+
"""
|
206 |
+
if all(arg is None for arg in (start, stop, num, endpoint)):
|
207 |
+
X = _expm_multiply_simple(A, B, traceA=traceA)
|
208 |
+
else:
|
209 |
+
X, status = _expm_multiply_interval(A, B, start, stop, num,
|
210 |
+
endpoint, traceA=traceA)
|
211 |
+
return X
|
212 |
+
|
213 |
+
|
214 |
+
def _expm_multiply_simple(A, B, t=1.0, traceA=None, balance=False):
|
215 |
+
"""
|
216 |
+
Compute the action of the matrix exponential at a single time point.
|
217 |
+
|
218 |
+
Parameters
|
219 |
+
----------
|
220 |
+
A : transposable linear operator
|
221 |
+
The operator whose exponential is of interest.
|
222 |
+
B : ndarray
|
223 |
+
The matrix to be multiplied by the matrix exponential of A.
|
224 |
+
t : float
|
225 |
+
A time point.
|
226 |
+
traceA : scalar, optional
|
227 |
+
Trace of `A`. If not given the trace is estimated for linear operators,
|
228 |
+
or calculated exactly for sparse matrices. It is used to precondition
|
229 |
+
`A`, thus an approximate trace is acceptable
|
230 |
+
balance : bool
|
231 |
+
Indicates whether or not to apply balancing.
|
232 |
+
|
233 |
+
Returns
|
234 |
+
-------
|
235 |
+
F : ndarray
|
236 |
+
:math:`e^{t A} B`
|
237 |
+
|
238 |
+
Notes
|
239 |
+
-----
|
240 |
+
This is algorithm (3.2) in Al-Mohy and Higham (2011).
|
241 |
+
|
242 |
+
"""
|
243 |
+
if balance:
|
244 |
+
raise NotImplementedError
|
245 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
246 |
+
raise ValueError('expected A to be like a square matrix')
|
247 |
+
if A.shape[1] != B.shape[0]:
|
248 |
+
raise ValueError('shapes of matrices A {} and B {} are incompatible'
|
249 |
+
.format(A.shape, B.shape))
|
250 |
+
ident = _ident_like(A)
|
251 |
+
is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator)
|
252 |
+
n = A.shape[0]
|
253 |
+
if len(B.shape) == 1:
|
254 |
+
n0 = 1
|
255 |
+
elif len(B.shape) == 2:
|
256 |
+
n0 = B.shape[1]
|
257 |
+
else:
|
258 |
+
raise ValueError('expected B to be like a matrix or a vector')
|
259 |
+
u_d = 2**-53
|
260 |
+
tol = u_d
|
261 |
+
if traceA is None:
|
262 |
+
if is_linear_operator:
|
263 |
+
warn("Trace of LinearOperator not available, it will be estimated."
|
264 |
+
" Provide `traceA` to ensure performance.", stacklevel=3)
|
265 |
+
# m3=1 is bit arbitrary choice, a more accurate trace (larger m3) might
|
266 |
+
# speed up exponential calculation, but trace estimation is more costly
|
267 |
+
traceA = traceest(A, m3=1) if is_linear_operator else _trace(A)
|
268 |
+
mu = traceA / float(n)
|
269 |
+
A = A - mu * ident
|
270 |
+
A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A)
|
271 |
+
if t*A_1_norm == 0:
|
272 |
+
m_star, s = 0, 1
|
273 |
+
else:
|
274 |
+
ell = 2
|
275 |
+
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
|
276 |
+
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
|
277 |
+
return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance)
|
278 |
+
|
279 |
+
|
280 |
+
def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False):
|
281 |
+
"""
|
282 |
+
A helper function.
|
283 |
+
"""
|
284 |
+
if balance:
|
285 |
+
raise NotImplementedError
|
286 |
+
if tol is None:
|
287 |
+
u_d = 2 ** -53
|
288 |
+
tol = u_d
|
289 |
+
F = B
|
290 |
+
eta = np.exp(t*mu / float(s))
|
291 |
+
for i in range(s):
|
292 |
+
c1 = _exact_inf_norm(B)
|
293 |
+
for j in range(m_star):
|
294 |
+
coeff = t / float(s*(j+1))
|
295 |
+
B = coeff * A.dot(B)
|
296 |
+
c2 = _exact_inf_norm(B)
|
297 |
+
F = F + B
|
298 |
+
if c1 + c2 <= tol * _exact_inf_norm(F):
|
299 |
+
break
|
300 |
+
c1 = c2
|
301 |
+
F = eta * F
|
302 |
+
B = F
|
303 |
+
return F
|
304 |
+
|
305 |
+
|
306 |
+
# This table helps to compute bounds.
|
307 |
+
# They seem to have been difficult to calculate, involving symbolic
|
308 |
+
# manipulation of equations, followed by numerical root finding.
|
309 |
+
_theta = {
|
310 |
+
# The first 30 values are from table A.3 of Computing Matrix Functions.
|
311 |
+
1: 2.29e-16,
|
312 |
+
2: 2.58e-8,
|
313 |
+
3: 1.39e-5,
|
314 |
+
4: 3.40e-4,
|
315 |
+
5: 2.40e-3,
|
316 |
+
6: 9.07e-3,
|
317 |
+
7: 2.38e-2,
|
318 |
+
8: 5.00e-2,
|
319 |
+
9: 8.96e-2,
|
320 |
+
10: 1.44e-1,
|
321 |
+
# 11
|
322 |
+
11: 2.14e-1,
|
323 |
+
12: 3.00e-1,
|
324 |
+
13: 4.00e-1,
|
325 |
+
14: 5.14e-1,
|
326 |
+
15: 6.41e-1,
|
327 |
+
16: 7.81e-1,
|
328 |
+
17: 9.31e-1,
|
329 |
+
18: 1.09,
|
330 |
+
19: 1.26,
|
331 |
+
20: 1.44,
|
332 |
+
# 21
|
333 |
+
21: 1.62,
|
334 |
+
22: 1.82,
|
335 |
+
23: 2.01,
|
336 |
+
24: 2.22,
|
337 |
+
25: 2.43,
|
338 |
+
26: 2.64,
|
339 |
+
27: 2.86,
|
340 |
+
28: 3.08,
|
341 |
+
29: 3.31,
|
342 |
+
30: 3.54,
|
343 |
+
# The rest are from table 3.1 of
|
344 |
+
# Computing the Action of the Matrix Exponential.
|
345 |
+
35: 4.7,
|
346 |
+
40: 6.0,
|
347 |
+
45: 7.2,
|
348 |
+
50: 8.5,
|
349 |
+
55: 9.9,
|
350 |
+
}
|
351 |
+
|
352 |
+
|
353 |
+
def _onenormest_matrix_power(A, p,
|
354 |
+
t=2, itmax=5, compute_v=False, compute_w=False):
|
355 |
+
"""
|
356 |
+
Efficiently estimate the 1-norm of A^p.
|
357 |
+
|
358 |
+
Parameters
|
359 |
+
----------
|
360 |
+
A : ndarray
|
361 |
+
Matrix whose 1-norm of a power is to be computed.
|
362 |
+
p : int
|
363 |
+
Non-negative integer power.
|
364 |
+
t : int, optional
|
365 |
+
A positive parameter controlling the tradeoff between
|
366 |
+
accuracy versus time and memory usage.
|
367 |
+
Larger values take longer and use more memory
|
368 |
+
but give more accurate output.
|
369 |
+
itmax : int, optional
|
370 |
+
Use at most this many iterations.
|
371 |
+
compute_v : bool, optional
|
372 |
+
Request a norm-maximizing linear operator input vector if True.
|
373 |
+
compute_w : bool, optional
|
374 |
+
Request a norm-maximizing linear operator output vector if True.
|
375 |
+
|
376 |
+
Returns
|
377 |
+
-------
|
378 |
+
est : float
|
379 |
+
An underestimate of the 1-norm of the sparse matrix.
|
380 |
+
v : ndarray, optional
|
381 |
+
The vector such that ||Av||_1 == est*||v||_1.
|
382 |
+
It can be thought of as an input to the linear operator
|
383 |
+
that gives an output with particularly large norm.
|
384 |
+
w : ndarray, optional
|
385 |
+
The vector Av which has relatively large 1-norm.
|
386 |
+
It can be thought of as an output of the linear operator
|
387 |
+
that is relatively large in norm compared to the input.
|
388 |
+
|
389 |
+
"""
|
390 |
+
#XXX Eventually turn this into an API function in the _onenormest module,
|
391 |
+
#XXX and remove its underscore,
|
392 |
+
#XXX but wait until expm_multiply goes into scipy.
|
393 |
+
from scipy.sparse.linalg._onenormest import onenormest
|
394 |
+
return onenormest(aslinearoperator(A) ** p)
|
395 |
+
|
396 |
+
class LazyOperatorNormInfo:
|
397 |
+
"""
|
398 |
+
Information about an operator is lazily computed.
|
399 |
+
|
400 |
+
The information includes the exact 1-norm of the operator,
|
401 |
+
in addition to estimates of 1-norms of powers of the operator.
|
402 |
+
This uses the notation of Computing the Action (2011).
|
403 |
+
This class is specialized enough to probably not be of general interest
|
404 |
+
outside of this module.
|
405 |
+
|
406 |
+
"""
|
407 |
+
|
408 |
+
def __init__(self, A, A_1_norm=None, ell=2, scale=1):
|
409 |
+
"""
|
410 |
+
Provide the operator and some norm-related information.
|
411 |
+
|
412 |
+
Parameters
|
413 |
+
----------
|
414 |
+
A : linear operator
|
415 |
+
The operator of interest.
|
416 |
+
A_1_norm : float, optional
|
417 |
+
The exact 1-norm of A.
|
418 |
+
ell : int, optional
|
419 |
+
A technical parameter controlling norm estimation quality.
|
420 |
+
scale : int, optional
|
421 |
+
If specified, return the norms of scale*A instead of A.
|
422 |
+
|
423 |
+
"""
|
424 |
+
self._A = A
|
425 |
+
self._A_1_norm = A_1_norm
|
426 |
+
self._ell = ell
|
427 |
+
self._d = {}
|
428 |
+
self._scale = scale
|
429 |
+
|
430 |
+
def set_scale(self,scale):
|
431 |
+
"""
|
432 |
+
Set the scale parameter.
|
433 |
+
"""
|
434 |
+
self._scale = scale
|
435 |
+
|
436 |
+
def onenorm(self):
|
437 |
+
"""
|
438 |
+
Compute the exact 1-norm.
|
439 |
+
"""
|
440 |
+
if self._A_1_norm is None:
|
441 |
+
self._A_1_norm = _exact_1_norm(self._A)
|
442 |
+
return self._scale*self._A_1_norm
|
443 |
+
|
444 |
+
def d(self, p):
|
445 |
+
"""
|
446 |
+
Lazily estimate :math:`d_p(A) ~= || A^p ||^(1/p)` where :math:`||.||` is the 1-norm.
|
447 |
+
"""
|
448 |
+
if p not in self._d:
|
449 |
+
est = _onenormest_matrix_power(self._A, p, self._ell)
|
450 |
+
self._d[p] = est ** (1.0 / p)
|
451 |
+
return self._scale*self._d[p]
|
452 |
+
|
453 |
+
def alpha(self, p):
|
454 |
+
"""
|
455 |
+
Lazily compute max(d(p), d(p+1)).
|
456 |
+
"""
|
457 |
+
return max(self.d(p), self.d(p+1))
|
458 |
+
|
459 |
+
def _compute_cost_div_m(m, p, norm_info):
|
460 |
+
"""
|
461 |
+
A helper function for computing bounds.
|
462 |
+
|
463 |
+
This is equation (3.10).
|
464 |
+
It measures cost in terms of the number of required matrix products.
|
465 |
+
|
466 |
+
Parameters
|
467 |
+
----------
|
468 |
+
m : int
|
469 |
+
A valid key of _theta.
|
470 |
+
p : int
|
471 |
+
A matrix power.
|
472 |
+
norm_info : LazyOperatorNormInfo
|
473 |
+
Information about 1-norms of related operators.
|
474 |
+
|
475 |
+
Returns
|
476 |
+
-------
|
477 |
+
cost_div_m : int
|
478 |
+
Required number of matrix products divided by m.
|
479 |
+
|
480 |
+
"""
|
481 |
+
return int(np.ceil(norm_info.alpha(p) / _theta[m]))
|
482 |
+
|
483 |
+
|
484 |
+
def _compute_p_max(m_max):
|
485 |
+
"""
|
486 |
+
Compute the largest positive integer p such that p*(p-1) <= m_max + 1.
|
487 |
+
|
488 |
+
Do this in a slightly dumb way, but safe and not too slow.
|
489 |
+
|
490 |
+
Parameters
|
491 |
+
----------
|
492 |
+
m_max : int
|
493 |
+
A count related to bounds.
|
494 |
+
|
495 |
+
"""
|
496 |
+
sqrt_m_max = np.sqrt(m_max)
|
497 |
+
p_low = int(np.floor(sqrt_m_max))
|
498 |
+
p_high = int(np.ceil(sqrt_m_max + 1))
|
499 |
+
return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)
|
500 |
+
|
501 |
+
|
502 |
+
def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):
|
503 |
+
"""
|
504 |
+
A helper function for the _expm_multiply_* functions.
|
505 |
+
|
506 |
+
Parameters
|
507 |
+
----------
|
508 |
+
norm_info : LazyOperatorNormInfo
|
509 |
+
Information about norms of certain linear operators of interest.
|
510 |
+
n0 : int
|
511 |
+
Number of columns in the _expm_multiply_* B matrix.
|
512 |
+
tol : float
|
513 |
+
Expected to be
|
514 |
+
:math:`2^{-24}` for single precision or
|
515 |
+
:math:`2^{-53}` for double precision.
|
516 |
+
m_max : int
|
517 |
+
A value related to a bound.
|
518 |
+
ell : int
|
519 |
+
The number of columns used in the 1-norm approximation.
|
520 |
+
This is usually taken to be small, maybe between 1 and 5.
|
521 |
+
|
522 |
+
Returns
|
523 |
+
-------
|
524 |
+
best_m : int
|
525 |
+
Related to bounds for error control.
|
526 |
+
best_s : int
|
527 |
+
Amount of scaling.
|
528 |
+
|
529 |
+
Notes
|
530 |
+
-----
|
531 |
+
This is code fragment (3.1) in Al-Mohy and Higham (2011).
|
532 |
+
The discussion of default values for m_max and ell
|
533 |
+
is given between the definitions of equation (3.11)
|
534 |
+
and the definition of equation (3.12).
|
535 |
+
|
536 |
+
"""
|
537 |
+
if ell < 1:
|
538 |
+
raise ValueError('expected ell to be a positive integer')
|
539 |
+
best_m = None
|
540 |
+
best_s = None
|
541 |
+
if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):
|
542 |
+
for m, theta in _theta.items():
|
543 |
+
s = int(np.ceil(norm_info.onenorm() / theta))
|
544 |
+
if best_m is None or m * s < best_m * best_s:
|
545 |
+
best_m = m
|
546 |
+
best_s = s
|
547 |
+
else:
|
548 |
+
# Equation (3.11).
|
549 |
+
for p in range(2, _compute_p_max(m_max) + 1):
|
550 |
+
for m in range(p*(p-1)-1, m_max+1):
|
551 |
+
if m in _theta:
|
552 |
+
s = _compute_cost_div_m(m, p, norm_info)
|
553 |
+
if best_m is None or m * s < best_m * best_s:
|
554 |
+
best_m = m
|
555 |
+
best_s = s
|
556 |
+
best_s = max(best_s, 1)
|
557 |
+
return best_m, best_s
|
558 |
+
|
559 |
+
|
560 |
+
def _condition_3_13(A_1_norm, n0, m_max, ell):
|
561 |
+
"""
|
562 |
+
A helper function for the _expm_multiply_* functions.
|
563 |
+
|
564 |
+
Parameters
|
565 |
+
----------
|
566 |
+
A_1_norm : float
|
567 |
+
The precomputed 1-norm of A.
|
568 |
+
n0 : int
|
569 |
+
Number of columns in the _expm_multiply_* B matrix.
|
570 |
+
m_max : int
|
571 |
+
A value related to a bound.
|
572 |
+
ell : int
|
573 |
+
The number of columns used in the 1-norm approximation.
|
574 |
+
This is usually taken to be small, maybe between 1 and 5.
|
575 |
+
|
576 |
+
Returns
|
577 |
+
-------
|
578 |
+
value : bool
|
579 |
+
Indicates whether or not the condition has been met.
|
580 |
+
|
581 |
+
Notes
|
582 |
+
-----
|
583 |
+
This is condition (3.13) in Al-Mohy and Higham (2011).
|
584 |
+
|
585 |
+
"""
|
586 |
+
|
587 |
+
# This is the rhs of equation (3.12).
|
588 |
+
p_max = _compute_p_max(m_max)
|
589 |
+
a = 2 * ell * p_max * (p_max + 3)
|
590 |
+
|
591 |
+
# Evaluate the condition (3.13).
|
592 |
+
b = _theta[m_max] / float(n0 * m_max)
|
593 |
+
return A_1_norm <= a * b
|
594 |
+
|
595 |
+
|
596 |
+
def _expm_multiply_interval(A, B, start=None, stop=None, num=None,
|
597 |
+
endpoint=None, traceA=None, balance=False,
|
598 |
+
status_only=False):
|
599 |
+
"""
|
600 |
+
Compute the action of the matrix exponential at multiple time points.
|
601 |
+
|
602 |
+
Parameters
|
603 |
+
----------
|
604 |
+
A : transposable linear operator
|
605 |
+
The operator whose exponential is of interest.
|
606 |
+
B : ndarray
|
607 |
+
The matrix to be multiplied by the matrix exponential of A.
|
608 |
+
start : scalar, optional
|
609 |
+
The starting time point of the sequence.
|
610 |
+
stop : scalar, optional
|
611 |
+
The end time point of the sequence, unless `endpoint` is set to False.
|
612 |
+
In that case, the sequence consists of all but the last of ``num + 1``
|
613 |
+
evenly spaced time points, so that `stop` is excluded.
|
614 |
+
Note that the step size changes when `endpoint` is False.
|
615 |
+
num : int, optional
|
616 |
+
Number of time points to use.
|
617 |
+
traceA : scalar, optional
|
618 |
+
Trace of `A`. If not given the trace is estimated for linear operators,
|
619 |
+
or calculated exactly for sparse matrices. It is used to precondition
|
620 |
+
`A`, thus an approximate trace is acceptable
|
621 |
+
endpoint : bool, optional
|
622 |
+
If True, `stop` is the last time point. Otherwise, it is not included.
|
623 |
+
balance : bool
|
624 |
+
Indicates whether or not to apply balancing.
|
625 |
+
status_only : bool
|
626 |
+
A flag that is set to True for some debugging and testing operations.
|
627 |
+
|
628 |
+
Returns
|
629 |
+
-------
|
630 |
+
F : ndarray
|
631 |
+
:math:`e^{t_k A} B`
|
632 |
+
status : int
|
633 |
+
An integer status for testing and debugging.
|
634 |
+
|
635 |
+
Notes
|
636 |
+
-----
|
637 |
+
This is algorithm (5.2) in Al-Mohy and Higham (2011).
|
638 |
+
|
639 |
+
There seems to be a typo, where line 15 of the algorithm should be
|
640 |
+
moved to line 6.5 (between lines 6 and 7).
|
641 |
+
|
642 |
+
"""
|
643 |
+
if balance:
|
644 |
+
raise NotImplementedError
|
645 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
646 |
+
raise ValueError('expected A to be like a square matrix')
|
647 |
+
if A.shape[1] != B.shape[0]:
|
648 |
+
raise ValueError('shapes of matrices A {} and B {} are incompatible'
|
649 |
+
.format(A.shape, B.shape))
|
650 |
+
ident = _ident_like(A)
|
651 |
+
is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator)
|
652 |
+
n = A.shape[0]
|
653 |
+
if len(B.shape) == 1:
|
654 |
+
n0 = 1
|
655 |
+
elif len(B.shape) == 2:
|
656 |
+
n0 = B.shape[1]
|
657 |
+
else:
|
658 |
+
raise ValueError('expected B to be like a matrix or a vector')
|
659 |
+
u_d = 2**-53
|
660 |
+
tol = u_d
|
661 |
+
if traceA is None:
|
662 |
+
if is_linear_operator:
|
663 |
+
warn("Trace of LinearOperator not available, it will be estimated."
|
664 |
+
" Provide `traceA` to ensure performance.", stacklevel=3)
|
665 |
+
# m3=5 is bit arbitrary choice, a more accurate trace (larger m3) might
|
666 |
+
# speed up exponential calculation, but trace estimation is also costly
|
667 |
+
# an educated guess would need to consider the number of time points
|
668 |
+
traceA = traceest(A, m3=5) if is_linear_operator else _trace(A)
|
669 |
+
mu = traceA / float(n)
|
670 |
+
|
671 |
+
# Get the linspace samples, attempting to preserve the linspace defaults.
|
672 |
+
linspace_kwargs = {'retstep': True}
|
673 |
+
if num is not None:
|
674 |
+
linspace_kwargs['num'] = num
|
675 |
+
if endpoint is not None:
|
676 |
+
linspace_kwargs['endpoint'] = endpoint
|
677 |
+
samples, step = np.linspace(start, stop, **linspace_kwargs)
|
678 |
+
|
679 |
+
# Convert the linspace output to the notation used by the publication.
|
680 |
+
nsamples = len(samples)
|
681 |
+
if nsamples < 2:
|
682 |
+
raise ValueError('at least two time points are required')
|
683 |
+
q = nsamples - 1
|
684 |
+
h = step
|
685 |
+
t_0 = samples[0]
|
686 |
+
t_q = samples[q]
|
687 |
+
|
688 |
+
# Define the output ndarray.
|
689 |
+
# Use an ndim=3 shape, such that the last two indices
|
690 |
+
# are the ones that may be involved in level 3 BLAS operations.
|
691 |
+
X_shape = (nsamples,) + B.shape
|
692 |
+
X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float))
|
693 |
+
t = t_q - t_0
|
694 |
+
A = A - mu * ident
|
695 |
+
A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A)
|
696 |
+
ell = 2
|
697 |
+
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
|
698 |
+
if t*A_1_norm == 0:
|
699 |
+
m_star, s = 0, 1
|
700 |
+
else:
|
701 |
+
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
|
702 |
+
|
703 |
+
# Compute the expm action up to the initial time point.
|
704 |
+
X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s)
|
705 |
+
|
706 |
+
# Compute the expm action at the rest of the time points.
|
707 |
+
if q <= s:
|
708 |
+
if status_only:
|
709 |
+
return 0
|
710 |
+
else:
|
711 |
+
return _expm_multiply_interval_core_0(A, X,
|
712 |
+
h, mu, q, norm_info, tol, ell,n0)
|
713 |
+
elif not (q % s):
|
714 |
+
if status_only:
|
715 |
+
return 1
|
716 |
+
else:
|
717 |
+
return _expm_multiply_interval_core_1(A, X,
|
718 |
+
h, mu, m_star, s, q, tol)
|
719 |
+
elif (q % s):
|
720 |
+
if status_only:
|
721 |
+
return 2
|
722 |
+
else:
|
723 |
+
return _expm_multiply_interval_core_2(A, X,
|
724 |
+
h, mu, m_star, s, q, tol)
|
725 |
+
else:
|
726 |
+
raise Exception('internal error')
|
727 |
+
|
728 |
+
|
729 |
+
def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0):
|
730 |
+
"""
|
731 |
+
A helper function, for the case q <= s.
|
732 |
+
"""
|
733 |
+
|
734 |
+
# Compute the new values of m_star and s which should be applied
|
735 |
+
# over intervals of size t/q
|
736 |
+
if norm_info.onenorm() == 0:
|
737 |
+
m_star, s = 0, 1
|
738 |
+
else:
|
739 |
+
norm_info.set_scale(1./q)
|
740 |
+
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
|
741 |
+
norm_info.set_scale(1)
|
742 |
+
|
743 |
+
for k in range(q):
|
744 |
+
X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s)
|
745 |
+
return X, 0
|
746 |
+
|
747 |
+
|
748 |
+
def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol):
|
749 |
+
"""
|
750 |
+
A helper function, for the case q > s and q % s == 0.
|
751 |
+
"""
|
752 |
+
d = q // s
|
753 |
+
input_shape = X.shape[1:]
|
754 |
+
K_shape = (m_star + 1, ) + input_shape
|
755 |
+
K = np.empty(K_shape, dtype=X.dtype)
|
756 |
+
for i in range(s):
|
757 |
+
Z = X[i*d]
|
758 |
+
K[0] = Z
|
759 |
+
high_p = 0
|
760 |
+
for k in range(1, d+1):
|
761 |
+
F = K[0]
|
762 |
+
c1 = _exact_inf_norm(F)
|
763 |
+
for p in range(1, m_star+1):
|
764 |
+
if p > high_p:
|
765 |
+
K[p] = h * A.dot(K[p-1]) / float(p)
|
766 |
+
coeff = float(pow(k, p))
|
767 |
+
F = F + coeff * K[p]
|
768 |
+
inf_norm_K_p_1 = _exact_inf_norm(K[p])
|
769 |
+
c2 = coeff * inf_norm_K_p_1
|
770 |
+
if c1 + c2 <= tol * _exact_inf_norm(F):
|
771 |
+
break
|
772 |
+
c1 = c2
|
773 |
+
X[k + i*d] = np.exp(k*h*mu) * F
|
774 |
+
return X, 1
|
775 |
+
|
776 |
+
|
777 |
+
def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol):
|
778 |
+
"""
|
779 |
+
A helper function, for the case q > s and q % s > 0.
|
780 |
+
"""
|
781 |
+
d = q // s
|
782 |
+
j = q // d
|
783 |
+
r = q - d * j
|
784 |
+
input_shape = X.shape[1:]
|
785 |
+
K_shape = (m_star + 1, ) + input_shape
|
786 |
+
K = np.empty(K_shape, dtype=X.dtype)
|
787 |
+
for i in range(j + 1):
|
788 |
+
Z = X[i*d]
|
789 |
+
K[0] = Z
|
790 |
+
high_p = 0
|
791 |
+
if i < j:
|
792 |
+
effective_d = d
|
793 |
+
else:
|
794 |
+
effective_d = r
|
795 |
+
for k in range(1, effective_d+1):
|
796 |
+
F = K[0]
|
797 |
+
c1 = _exact_inf_norm(F)
|
798 |
+
for p in range(1, m_star+1):
|
799 |
+
if p == high_p + 1:
|
800 |
+
K[p] = h * A.dot(K[p-1]) / float(p)
|
801 |
+
high_p = p
|
802 |
+
coeff = float(pow(k, p))
|
803 |
+
F = F + coeff * K[p]
|
804 |
+
inf_norm_K_p_1 = _exact_inf_norm(K[p])
|
805 |
+
c2 = coeff * inf_norm_K_p_1
|
806 |
+
if c1 + c2 <= tol * _exact_inf_norm(F):
|
807 |
+
break
|
808 |
+
c1 = c2
|
809 |
+
X[k + i*d] = np.exp(k*h*mu) * F
|
810 |
+
return X, 2
|
venv/lib/python3.10/site-packages/scipy/sparse/linalg/_interface.py
ADDED
@@ -0,0 +1,896 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Abstract linear algebra library.
|
2 |
+
|
3 |
+
This module defines a class hierarchy that implements a kind of "lazy"
|
4 |
+
matrix representation, called the ``LinearOperator``. It can be used to do
|
5 |
+
linear algebra with extremely large sparse or structured matrices, without
|
6 |
+
representing those explicitly in memory. Such matrices can be added,
|
7 |
+
multiplied, transposed, etc.
|
8 |
+
|
9 |
+
As a motivating example, suppose you want have a matrix where almost all of
|
10 |
+
the elements have the value one. The standard sparse matrix representation
|
11 |
+
skips the storage of zeros, but not ones. By contrast, a LinearOperator is
|
12 |
+
able to represent such matrices efficiently. First, we need a compact way to
|
13 |
+
represent an all-ones matrix::
|
14 |
+
|
15 |
+
>>> import numpy as np
|
16 |
+
>>> from scipy.sparse.linalg._interface import LinearOperator
|
17 |
+
>>> class Ones(LinearOperator):
|
18 |
+
... def __init__(self, shape):
|
19 |
+
... super().__init__(dtype=None, shape=shape)
|
20 |
+
... def _matvec(self, x):
|
21 |
+
... return np.repeat(x.sum(), self.shape[0])
|
22 |
+
|
23 |
+
Instances of this class emulate ``np.ones(shape)``, but using a constant
|
24 |
+
amount of storage, independent of ``shape``. The ``_matvec`` method specifies
|
25 |
+
how this linear operator multiplies with (operates on) a vector. We can now
|
26 |
+
add this operator to a sparse matrix that stores only offsets from one::
|
27 |
+
|
28 |
+
>>> from scipy.sparse.linalg._interface import aslinearoperator
|
29 |
+
>>> from scipy.sparse import csr_matrix
|
30 |
+
>>> offsets = csr_matrix([[1, 0, 2], [0, -1, 0], [0, 0, 3]])
|
31 |
+
>>> A = aslinearoperator(offsets) + Ones(offsets.shape)
|
32 |
+
>>> A.dot([1, 2, 3])
|
33 |
+
array([13, 4, 15])
|
34 |
+
|
35 |
+
The result is the same as that given by its dense, explicitly-stored
|
36 |
+
counterpart::
|
37 |
+
|
38 |
+
>>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3])
|
39 |
+
array([13, 4, 15])
|
40 |
+
|
41 |
+
Several algorithms in the ``scipy.sparse`` library are able to operate on
|
42 |
+
``LinearOperator`` instances.
|
43 |
+
"""
|
44 |
+
|
45 |
+
import warnings
|
46 |
+
|
47 |
+
import numpy as np
|
48 |
+
|
49 |
+
from scipy.sparse import issparse
|
50 |
+
from scipy.sparse._sputils import isshape, isintlike, asmatrix, is_pydata_spmatrix
|
51 |
+
|
52 |
+
__all__ = ['LinearOperator', 'aslinearoperator']
|
53 |
+
|
54 |
+
|
55 |
+
class LinearOperator:
|
56 |
+
"""Common interface for performing matrix vector products
|
57 |
+
|
58 |
+
Many iterative methods (e.g. cg, gmres) do not need to know the
|
59 |
+
individual entries of a matrix to solve a linear system A*x=b.
|
60 |
+
Such solvers only require the computation of matrix vector
|
61 |
+
products, A*v where v is a dense vector. This class serves as
|
62 |
+
an abstract interface between iterative solvers and matrix-like
|
63 |
+
objects.
|
64 |
+
|
65 |
+
To construct a concrete LinearOperator, either pass appropriate
|
66 |
+
callables to the constructor of this class, or subclass it.
|
67 |
+
|
68 |
+
A subclass must implement either one of the methods ``_matvec``
|
69 |
+
and ``_matmat``, and the attributes/properties ``shape`` (pair of
|
70 |
+
integers) and ``dtype`` (may be None). It may call the ``__init__``
|
71 |
+
on this class to have these attributes validated. Implementing
|
72 |
+
``_matvec`` automatically implements ``_matmat`` (using a naive
|
73 |
+
algorithm) and vice-versa.
|
74 |
+
|
75 |
+
Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``
|
76 |
+
to implement the Hermitian adjoint (conjugate transpose). As with
|
77 |
+
``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or
|
78 |
+
``_adjoint`` implements the other automatically. Implementing
|
79 |
+
``_adjoint`` is preferable; ``_rmatvec`` is mostly there for
|
80 |
+
backwards compatibility.
|
81 |
+
|
82 |
+
Parameters
|
83 |
+
----------
|
84 |
+
shape : tuple
|
85 |
+
Matrix dimensions (M, N).
|
86 |
+
matvec : callable f(v)
|
87 |
+
Returns returns A * v.
|
88 |
+
rmatvec : callable f(v)
|
89 |
+
Returns A^H * v, where A^H is the conjugate transpose of A.
|
90 |
+
matmat : callable f(V)
|
91 |
+
Returns A * V, where V is a dense matrix with dimensions (N, K).
|
92 |
+
dtype : dtype
|
93 |
+
Data type of the matrix.
|
94 |
+
rmatmat : callable f(V)
|
95 |
+
Returns A^H * V, where V is a dense matrix with dimensions (M, K).
|
96 |
+
|
97 |
+
Attributes
|
98 |
+
----------
|
99 |
+
args : tuple
|
100 |
+
For linear operators describing products etc. of other linear
|
101 |
+
operators, the operands of the binary operation.
|
102 |
+
ndim : int
|
103 |
+
Number of dimensions (this is always 2)
|
104 |
+
|
105 |
+
See Also
|
106 |
+
--------
|
107 |
+
aslinearoperator : Construct LinearOperators
|
108 |
+
|
109 |
+
Notes
|
110 |
+
-----
|
111 |
+
The user-defined matvec() function must properly handle the case
|
112 |
+
where v has shape (N,) as well as the (N,1) case. The shape of
|
113 |
+
the return type is handled internally by LinearOperator.
|
114 |
+
|
115 |
+
LinearOperator instances can also be multiplied, added with each
|
116 |
+
other and exponentiated, all lazily: the result of these operations
|
117 |
+
is always a new, composite LinearOperator, that defers linear
|
118 |
+
operations to the original operators and combines the results.
|
119 |
+
|
120 |
+
More details regarding how to subclass a LinearOperator and several
|
121 |
+
examples of concrete LinearOperator instances can be found in the
|
122 |
+
external project `PyLops <https://pylops.readthedocs.io>`_.
|
123 |
+
|
124 |
+
|
125 |
+
Examples
|
126 |
+
--------
|
127 |
+
>>> import numpy as np
|
128 |
+
>>> from scipy.sparse.linalg import LinearOperator
|
129 |
+
>>> def mv(v):
|
130 |
+
... return np.array([2*v[0], 3*v[1]])
|
131 |
+
...
|
132 |
+
>>> A = LinearOperator((2,2), matvec=mv)
|
133 |
+
>>> A
|
134 |
+
<2x2 _CustomLinearOperator with dtype=float64>
|
135 |
+
>>> A.matvec(np.ones(2))
|
136 |
+
array([ 2., 3.])
|
137 |
+
>>> A * np.ones(2)
|
138 |
+
array([ 2., 3.])
|
139 |
+
|
140 |
+
"""
|
141 |
+
|
142 |
+
ndim = 2
|
143 |
+
# Necessary for right matmul with numpy arrays.
|
144 |
+
__array_ufunc__ = None
|
145 |
+
|
146 |
+
def __new__(cls, *args, **kwargs):
|
147 |
+
if cls is LinearOperator:
|
148 |
+
# Operate as _CustomLinearOperator factory.
|
149 |
+
return super().__new__(_CustomLinearOperator)
|
150 |
+
else:
|
151 |
+
obj = super().__new__(cls)
|
152 |
+
|
153 |
+
if (type(obj)._matvec == LinearOperator._matvec
|
154 |
+
and type(obj)._matmat == LinearOperator._matmat):
|
155 |
+
warnings.warn("LinearOperator subclass should implement"
|
156 |
+
" at least one of _matvec and _matmat.",
|
157 |
+
category=RuntimeWarning, stacklevel=2)
|
158 |
+
|
159 |
+
return obj
|
160 |
+
|
161 |
+
def __init__(self, dtype, shape):
|
162 |
+
"""Initialize this LinearOperator.
|
163 |
+
|
164 |
+
To be called by subclasses. ``dtype`` may be None; ``shape`` should
|
165 |
+
be convertible to a length-2 tuple.
|
166 |
+
"""
|
167 |
+
if dtype is not None:
|
168 |
+
dtype = np.dtype(dtype)
|
169 |
+
|
170 |
+
shape = tuple(shape)
|
171 |
+
if not isshape(shape):
|
172 |
+
raise ValueError(f"invalid shape {shape!r} (must be 2-d)")
|
173 |
+
|
174 |
+
self.dtype = dtype
|
175 |
+
self.shape = shape
|
176 |
+
|
177 |
+
def _init_dtype(self):
|
178 |
+
"""Called from subclasses at the end of the __init__ routine.
|
179 |
+
"""
|
180 |
+
if self.dtype is None:
|
181 |
+
v = np.zeros(self.shape[-1])
|
182 |
+
self.dtype = np.asarray(self.matvec(v)).dtype
|
183 |
+
|
184 |
+
def _matmat(self, X):
|
185 |
+
"""Default matrix-matrix multiplication handler.
|
186 |
+
|
187 |
+
Falls back on the user-defined _matvec method, so defining that will
|
188 |
+
define matrix multiplication (though in a very suboptimal way).
|
189 |
+
"""
|
190 |
+
|
191 |
+
return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T])
|
192 |
+
|
193 |
+
def _matvec(self, x):
|
194 |
+
"""Default matrix-vector multiplication handler.
|
195 |
+
|
196 |
+
If self is a linear operator of shape (M, N), then this method will
|
197 |
+
be called on a shape (N,) or (N, 1) ndarray, and should return a
|
198 |
+
shape (M,) or (M, 1) ndarray.
|
199 |
+
|
200 |
+
This default implementation falls back on _matmat, so defining that
|
201 |
+
will define matrix-vector multiplication as well.
|
202 |
+
"""
|
203 |
+
return self.matmat(x.reshape(-1, 1))
|
204 |
+
|
205 |
+
def matvec(self, x):
|
206 |
+
"""Matrix-vector multiplication.
|
207 |
+
|
208 |
+
Performs the operation y=A*x where A is an MxN linear
|
209 |
+
operator and x is a column vector or 1-d array.
|
210 |
+
|
211 |
+
Parameters
|
212 |
+
----------
|
213 |
+
x : {matrix, ndarray}
|
214 |
+
An array with shape (N,) or (N,1).
|
215 |
+
|
216 |
+
Returns
|
217 |
+
-------
|
218 |
+
y : {matrix, ndarray}
|
219 |
+
A matrix or ndarray with shape (M,) or (M,1) depending
|
220 |
+
on the type and shape of the x argument.
|
221 |
+
|
222 |
+
Notes
|
223 |
+
-----
|
224 |
+
This matvec wraps the user-specified matvec routine or overridden
|
225 |
+
_matvec method to ensure that y has the correct shape and type.
|
226 |
+
|
227 |
+
"""
|
228 |
+
|
229 |
+
x = np.asanyarray(x)
|
230 |
+
|
231 |
+
M,N = self.shape
|
232 |
+
|
233 |
+
if x.shape != (N,) and x.shape != (N,1):
|
234 |
+
raise ValueError('dimension mismatch')
|
235 |
+
|
236 |
+
y = self._matvec(x)
|
237 |
+
|
238 |
+
if isinstance(x, np.matrix):
|
239 |
+
y = asmatrix(y)
|
240 |
+
else:
|
241 |
+
y = np.asarray(y)
|
242 |
+
|
243 |
+
if x.ndim == 1:
|
244 |
+
y = y.reshape(M)
|
245 |
+
elif x.ndim == 2:
|
246 |
+
y = y.reshape(M,1)
|
247 |
+
else:
|
248 |
+
raise ValueError('invalid shape returned by user-defined matvec()')
|
249 |
+
|
250 |
+
return y
|
251 |
+
|
252 |
+
def rmatvec(self, x):
|
253 |
+
"""Adjoint matrix-vector multiplication.
|
254 |
+
|
255 |
+
Performs the operation y = A^H * x where A is an MxN linear
|
256 |
+
operator and x is a column vector or 1-d array.
|
257 |
+
|
258 |
+
Parameters
|
259 |
+
----------
|
260 |
+
x : {matrix, ndarray}
|
261 |
+
An array with shape (M,) or (M,1).
|
262 |
+
|
263 |
+
Returns
|
264 |
+
-------
|
265 |
+
y : {matrix, ndarray}
|
266 |
+
A matrix or ndarray with shape (N,) or (N,1) depending
|
267 |
+
on the type and shape of the x argument.
|
268 |
+
|
269 |
+
Notes
|
270 |
+
-----
|
271 |
+
This rmatvec wraps the user-specified rmatvec routine or overridden
|
272 |
+
_rmatvec method to ensure that y has the correct shape and type.
|
273 |
+
|
274 |
+
"""
|
275 |
+
|
276 |
+
x = np.asanyarray(x)
|
277 |
+
|
278 |
+
M,N = self.shape
|
279 |
+
|
280 |
+
if x.shape != (M,) and x.shape != (M,1):
|
281 |
+
raise ValueError('dimension mismatch')
|
282 |
+
|
283 |
+
y = self._rmatvec(x)
|
284 |
+
|
285 |
+
if isinstance(x, np.matrix):
|
286 |
+
y = asmatrix(y)
|
287 |
+
else:
|
288 |
+
y = np.asarray(y)
|
289 |
+
|
290 |
+
if x.ndim == 1:
|
291 |
+
y = y.reshape(N)
|
292 |
+
elif x.ndim == 2:
|
293 |
+
y = y.reshape(N,1)
|
294 |
+
else:
|
295 |
+
raise ValueError('invalid shape returned by user-defined rmatvec()')
|
296 |
+
|
297 |
+
return y
|
298 |
+
|
299 |
+
def _rmatvec(self, x):
|
300 |
+
"""Default implementation of _rmatvec; defers to adjoint."""
|
301 |
+
if type(self)._adjoint == LinearOperator._adjoint:
|
302 |
+
# _adjoint not overridden, prevent infinite recursion
|
303 |
+
raise NotImplementedError
|
304 |
+
else:
|
305 |
+
return self.H.matvec(x)
|
306 |
+
|
307 |
+
def matmat(self, X):
|
308 |
+
"""Matrix-matrix multiplication.
|
309 |
+
|
310 |
+
Performs the operation y=A*X where A is an MxN linear
|
311 |
+
operator and X dense N*K matrix or ndarray.
|
312 |
+
|
313 |
+
Parameters
|
314 |
+
----------
|
315 |
+
X : {matrix, ndarray}
|
316 |
+
An array with shape (N,K).
|
317 |
+
|
318 |
+
Returns
|
319 |
+
-------
|
320 |
+
Y : {matrix, ndarray}
|
321 |
+
A matrix or ndarray with shape (M,K) depending on
|
322 |
+
the type of the X argument.
|
323 |
+
|
324 |
+
Notes
|
325 |
+
-----
|
326 |
+
This matmat wraps any user-specified matmat routine or overridden
|
327 |
+
_matmat method to ensure that y has the correct type.
|
328 |
+
|
329 |
+
"""
|
330 |
+
if not (issparse(X) or is_pydata_spmatrix(X)):
|
331 |
+
X = np.asanyarray(X)
|
332 |
+
|
333 |
+
if X.ndim != 2:
|
334 |
+
raise ValueError(f'expected 2-d ndarray or matrix, not {X.ndim}-d')
|
335 |
+
|
336 |
+
if X.shape[0] != self.shape[1]:
|
337 |
+
raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}')
|
338 |
+
|
339 |
+
try:
|
340 |
+
Y = self._matmat(X)
|
341 |
+
except Exception as e:
|
342 |
+
if issparse(X) or is_pydata_spmatrix(X):
|
343 |
+
raise TypeError(
|
344 |
+
"Unable to multiply a LinearOperator with a sparse matrix."
|
345 |
+
" Wrap the matrix in aslinearoperator first."
|
346 |
+
) from e
|
347 |
+
raise
|
348 |
+
|
349 |
+
if isinstance(Y, np.matrix):
|
350 |
+
Y = asmatrix(Y)
|
351 |
+
|
352 |
+
return Y
|
353 |
+
|
354 |
+
def rmatmat(self, X):
|
355 |
+
"""Adjoint matrix-matrix multiplication.
|
356 |
+
|
357 |
+
Performs the operation y = A^H * x where A is an MxN linear
|
358 |
+
operator and x is a column vector or 1-d array, or 2-d array.
|
359 |
+
The default implementation defers to the adjoint.
|
360 |
+
|
361 |
+
Parameters
|
362 |
+
----------
|
363 |
+
X : {matrix, ndarray}
|
364 |
+
A matrix or 2D array.
|
365 |
+
|
366 |
+
Returns
|
367 |
+
-------
|
368 |
+
Y : {matrix, ndarray}
|
369 |
+
A matrix or 2D array depending on the type of the input.
|
370 |
+
|
371 |
+
Notes
|
372 |
+
-----
|
373 |
+
This rmatmat wraps the user-specified rmatmat routine.
|
374 |
+
|
375 |
+
"""
|
376 |
+
if not (issparse(X) or is_pydata_spmatrix(X)):
|
377 |
+
X = np.asanyarray(X)
|
378 |
+
|
379 |
+
if X.ndim != 2:
|
380 |
+
raise ValueError('expected 2-d ndarray or matrix, not %d-d'
|
381 |
+
% X.ndim)
|
382 |
+
|
383 |
+
if X.shape[0] != self.shape[0]:
|
384 |
+
raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}')
|
385 |
+
|
386 |
+
try:
|
387 |
+
Y = self._rmatmat(X)
|
388 |
+
except Exception as e:
|
389 |
+
if issparse(X) or is_pydata_spmatrix(X):
|
390 |
+
raise TypeError(
|
391 |
+
"Unable to multiply a LinearOperator with a sparse matrix."
|
392 |
+
" Wrap the matrix in aslinearoperator() first."
|
393 |
+
) from e
|
394 |
+
raise
|
395 |
+
|
396 |
+
if isinstance(Y, np.matrix):
|
397 |
+
Y = asmatrix(Y)
|
398 |
+
return Y
|
399 |
+
|
400 |
+
def _rmatmat(self, X):
|
401 |
+
"""Default implementation of _rmatmat defers to rmatvec or adjoint."""
|
402 |
+
if type(self)._adjoint == LinearOperator._adjoint:
|
403 |
+
return np.hstack([self.rmatvec(col.reshape(-1, 1)) for col in X.T])
|
404 |
+
else:
|
405 |
+
return self.H.matmat(X)
|
406 |
+
|
407 |
+
def __call__(self, x):
|
408 |
+
return self*x
|
409 |
+
|
410 |
+
def __mul__(self, x):
|
411 |
+
return self.dot(x)
|
412 |
+
|
413 |
+
def __truediv__(self, other):
|
414 |
+
if not np.isscalar(other):
|
415 |
+
raise ValueError("Can only divide a linear operator by a scalar.")
|
416 |
+
|
417 |
+
return _ScaledLinearOperator(self, 1.0/other)
|
418 |
+
|
419 |
+
def dot(self, x):
|
420 |
+
"""Matrix-matrix or matrix-vector multiplication.
|
421 |
+
|
422 |
+
Parameters
|
423 |
+
----------
|
424 |
+
x : array_like
|
425 |
+
1-d or 2-d array, representing a vector or matrix.
|
426 |
+
|
427 |
+
Returns
|
428 |
+
-------
|
429 |
+
Ax : array
|
430 |
+
1-d or 2-d array (depending on the shape of x) that represents
|
431 |
+
the result of applying this linear operator on x.
|
432 |
+
|
433 |
+
"""
|
434 |
+
if isinstance(x, LinearOperator):
|
435 |
+
return _ProductLinearOperator(self, x)
|
436 |
+
elif np.isscalar(x):
|
437 |
+
return _ScaledLinearOperator(self, x)
|
438 |
+
else:
|
439 |
+
if not issparse(x) and not is_pydata_spmatrix(x):
|
440 |
+
# Sparse matrices shouldn't be converted to numpy arrays.
|
441 |
+
x = np.asarray(x)
|
442 |
+
|
443 |
+
if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
|
444 |
+
return self.matvec(x)
|
445 |
+
elif x.ndim == 2:
|
446 |
+
return self.matmat(x)
|
447 |
+
else:
|
448 |
+
raise ValueError('expected 1-d or 2-d array or matrix, got %r'
|
449 |
+
% x)
|
450 |
+
|
451 |
+
def __matmul__(self, other):
|
452 |
+
if np.isscalar(other):
|
453 |
+
raise ValueError("Scalar operands are not allowed, "
|
454 |
+
"use '*' instead")
|
455 |
+
return self.__mul__(other)
|
456 |
+
|
457 |
+
def __rmatmul__(self, other):
|
458 |
+
if np.isscalar(other):
|
459 |
+
raise ValueError("Scalar operands are not allowed, "
|
460 |
+
"use '*' instead")
|
461 |
+
return self.__rmul__(other)
|
462 |
+
|
463 |
+
def __rmul__(self, x):
|
464 |
+
if np.isscalar(x):
|
465 |
+
return _ScaledLinearOperator(self, x)
|
466 |
+
else:
|
467 |
+
return self._rdot(x)
|
468 |
+
|
469 |
+
def _rdot(self, x):
|
470 |
+
"""Matrix-matrix or matrix-vector multiplication from the right.
|
471 |
+
|
472 |
+
Parameters
|
473 |
+
----------
|
474 |
+
x : array_like
|
475 |
+
1-d or 2-d array, representing a vector or matrix.
|
476 |
+
|
477 |
+
Returns
|
478 |
+
-------
|
479 |
+
xA : array
|
480 |
+
1-d or 2-d array (depending on the shape of x) that represents
|
481 |
+
the result of applying this linear operator on x from the right.
|
482 |
+
|
483 |
+
Notes
|
484 |
+
-----
|
485 |
+
This is copied from dot to implement right multiplication.
|
486 |
+
"""
|
487 |
+
if isinstance(x, LinearOperator):
|
488 |
+
return _ProductLinearOperator(x, self)
|
489 |
+
elif np.isscalar(x):
|
490 |
+
return _ScaledLinearOperator(self, x)
|
491 |
+
else:
|
492 |
+
if not issparse(x) and not is_pydata_spmatrix(x):
|
493 |
+
# Sparse matrices shouldn't be converted to numpy arrays.
|
494 |
+
x = np.asarray(x)
|
495 |
+
|
496 |
+
# We use transpose instead of rmatvec/rmatmat to avoid
|
497 |
+
# unnecessary complex conjugation if possible.
|
498 |
+
if x.ndim == 1 or x.ndim == 2 and x.shape[0] == 1:
|
499 |
+
return self.T.matvec(x.T).T
|
500 |
+
elif x.ndim == 2:
|
501 |
+
return self.T.matmat(x.T).T
|
502 |
+
else:
|
503 |
+
raise ValueError('expected 1-d or 2-d array or matrix, got %r'
|
504 |
+
% x)
|
505 |
+
|
506 |
+
def __pow__(self, p):
|
507 |
+
if np.isscalar(p):
|
508 |
+
return _PowerLinearOperator(self, p)
|
509 |
+
else:
|
510 |
+
return NotImplemented
|
511 |
+
|
512 |
+
def __add__(self, x):
|
513 |
+
if isinstance(x, LinearOperator):
|
514 |
+
return _SumLinearOperator(self, x)
|
515 |
+
else:
|
516 |
+
return NotImplemented
|
517 |
+
|
518 |
+
def __neg__(self):
|
519 |
+
return _ScaledLinearOperator(self, -1)
|
520 |
+
|
521 |
+
def __sub__(self, x):
|
522 |
+
return self.__add__(-x)
|
523 |
+
|
524 |
+
def __repr__(self):
|
525 |
+
M,N = self.shape
|
526 |
+
if self.dtype is None:
|
527 |
+
dt = 'unspecified dtype'
|
528 |
+
else:
|
529 |
+
dt = 'dtype=' + str(self.dtype)
|
530 |
+
|
531 |
+
return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt)
|
532 |
+
|
533 |
+
def adjoint(self):
|
534 |
+
"""Hermitian adjoint.
|
535 |
+
|
536 |
+
Returns the Hermitian adjoint of self, aka the Hermitian
|
537 |
+
conjugate or Hermitian transpose. For a complex matrix, the
|
538 |
+
Hermitian adjoint is equal to the conjugate transpose.
|
539 |
+
|
540 |
+
Can be abbreviated self.H instead of self.adjoint().
|
541 |
+
|
542 |
+
Returns
|
543 |
+
-------
|
544 |
+
A_H : LinearOperator
|
545 |
+
Hermitian adjoint of self.
|
546 |
+
"""
|
547 |
+
return self._adjoint()
|
548 |
+
|
549 |
+
H = property(adjoint)
|
550 |
+
|
551 |
+
def transpose(self):
|
552 |
+
"""Transpose this linear operator.
|
553 |
+
|
554 |
+
Returns a LinearOperator that represents the transpose of this one.
|
555 |
+
Can be abbreviated self.T instead of self.transpose().
|
556 |
+
"""
|
557 |
+
return self._transpose()
|
558 |
+
|
559 |
+
T = property(transpose)
|
560 |
+
|
561 |
+
def _adjoint(self):
|
562 |
+
"""Default implementation of _adjoint; defers to rmatvec."""
|
563 |
+
return _AdjointLinearOperator(self)
|
564 |
+
|
565 |
+
def _transpose(self):
|
566 |
+
""" Default implementation of _transpose; defers to rmatvec + conj"""
|
567 |
+
return _TransposedLinearOperator(self)
|
568 |
+
|
569 |
+
|
570 |
+
class _CustomLinearOperator(LinearOperator):
|
571 |
+
"""Linear operator defined in terms of user-specified operations."""
|
572 |
+
|
573 |
+
def __init__(self, shape, matvec, rmatvec=None, matmat=None,
|
574 |
+
dtype=None, rmatmat=None):
|
575 |
+
super().__init__(dtype, shape)
|
576 |
+
|
577 |
+
self.args = ()
|
578 |
+
|
579 |
+
self.__matvec_impl = matvec
|
580 |
+
self.__rmatvec_impl = rmatvec
|
581 |
+
self.__rmatmat_impl = rmatmat
|
582 |
+
self.__matmat_impl = matmat
|
583 |
+
|
584 |
+
self._init_dtype()
|
585 |
+
|
586 |
+
def _matmat(self, X):
|
587 |
+
if self.__matmat_impl is not None:
|
588 |
+
return self.__matmat_impl(X)
|
589 |
+
else:
|
590 |
+
return super()._matmat(X)
|
591 |
+
|
592 |
+
def _matvec(self, x):
|
593 |
+
return self.__matvec_impl(x)
|
594 |
+
|
595 |
+
def _rmatvec(self, x):
|
596 |
+
func = self.__rmatvec_impl
|
597 |
+
if func is None:
|
598 |
+
raise NotImplementedError("rmatvec is not defined")
|
599 |
+
return self.__rmatvec_impl(x)
|
600 |
+
|
601 |
+
def _rmatmat(self, X):
|
602 |
+
if self.__rmatmat_impl is not None:
|
603 |
+
return self.__rmatmat_impl(X)
|
604 |
+
else:
|
605 |
+
return super()._rmatmat(X)
|
606 |
+
|
607 |
+
def _adjoint(self):
|
608 |
+
return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]),
|
609 |
+
matvec=self.__rmatvec_impl,
|
610 |
+
rmatvec=self.__matvec_impl,
|
611 |
+
matmat=self.__rmatmat_impl,
|
612 |
+
rmatmat=self.__matmat_impl,
|
613 |
+
dtype=self.dtype)
|
614 |
+
|
615 |
+
|
616 |
+
class _AdjointLinearOperator(LinearOperator):
|
617 |
+
"""Adjoint of arbitrary Linear Operator"""
|
618 |
+
|
619 |
+
def __init__(self, A):
|
620 |
+
shape = (A.shape[1], A.shape[0])
|
621 |
+
super().__init__(dtype=A.dtype, shape=shape)
|
622 |
+
self.A = A
|
623 |
+
self.args = (A,)
|
624 |
+
|
625 |
+
def _matvec(self, x):
|
626 |
+
return self.A._rmatvec(x)
|
627 |
+
|
628 |
+
def _rmatvec(self, x):
|
629 |
+
return self.A._matvec(x)
|
630 |
+
|
631 |
+
def _matmat(self, x):
|
632 |
+
return self.A._rmatmat(x)
|
633 |
+
|
634 |
+
def _rmatmat(self, x):
|
635 |
+
return self.A._matmat(x)
|
636 |
+
|
637 |
+
class _TransposedLinearOperator(LinearOperator):
|
638 |
+
"""Transposition of arbitrary Linear Operator"""
|
639 |
+
|
640 |
+
def __init__(self, A):
|
641 |
+
shape = (A.shape[1], A.shape[0])
|
642 |
+
super().__init__(dtype=A.dtype, shape=shape)
|
643 |
+
self.A = A
|
644 |
+
self.args = (A,)
|
645 |
+
|
646 |
+
def _matvec(self, x):
|
647 |
+
# NB. np.conj works also on sparse matrices
|
648 |
+
return np.conj(self.A._rmatvec(np.conj(x)))
|
649 |
+
|
650 |
+
def _rmatvec(self, x):
|
651 |
+
return np.conj(self.A._matvec(np.conj(x)))
|
652 |
+
|
653 |
+
def _matmat(self, x):
|
654 |
+
# NB. np.conj works also on sparse matrices
|
655 |
+
return np.conj(self.A._rmatmat(np.conj(x)))
|
656 |
+
|
657 |
+
def _rmatmat(self, x):
|
658 |
+
return np.conj(self.A._matmat(np.conj(x)))
|
659 |
+
|
660 |
+
def _get_dtype(operators, dtypes=None):
|
661 |
+
if dtypes is None:
|
662 |
+
dtypes = []
|
663 |
+
for obj in operators:
|
664 |
+
if obj is not None and hasattr(obj, 'dtype'):
|
665 |
+
dtypes.append(obj.dtype)
|
666 |
+
return np.result_type(*dtypes)
|
667 |
+
|
668 |
+
|
669 |
+
class _SumLinearOperator(LinearOperator):
|
670 |
+
def __init__(self, A, B):
|
671 |
+
if not isinstance(A, LinearOperator) or \
|
672 |
+
not isinstance(B, LinearOperator):
|
673 |
+
raise ValueError('both operands have to be a LinearOperator')
|
674 |
+
if A.shape != B.shape:
|
675 |
+
raise ValueError(f'cannot add {A} and {B}: shape mismatch')
|
676 |
+
self.args = (A, B)
|
677 |
+
super().__init__(_get_dtype([A, B]), A.shape)
|
678 |
+
|
679 |
+
def _matvec(self, x):
|
680 |
+
return self.args[0].matvec(x) + self.args[1].matvec(x)
|
681 |
+
|
682 |
+
def _rmatvec(self, x):
|
683 |
+
return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)
|
684 |
+
|
685 |
+
def _rmatmat(self, x):
|
686 |
+
return self.args[0].rmatmat(x) + self.args[1].rmatmat(x)
|
687 |
+
|
688 |
+
def _matmat(self, x):
|
689 |
+
return self.args[0].matmat(x) + self.args[1].matmat(x)
|
690 |
+
|
691 |
+
def _adjoint(self):
|
692 |
+
A, B = self.args
|
693 |
+
return A.H + B.H
|
694 |
+
|
695 |
+
|
696 |
+
class _ProductLinearOperator(LinearOperator):
|
697 |
+
def __init__(self, A, B):
|
698 |
+
if not isinstance(A, LinearOperator) or \
|
699 |
+
not isinstance(B, LinearOperator):
|
700 |
+
raise ValueError('both operands have to be a LinearOperator')
|
701 |
+
if A.shape[1] != B.shape[0]:
|
702 |
+
raise ValueError(f'cannot multiply {A} and {B}: shape mismatch')
|
703 |
+
super().__init__(_get_dtype([A, B]),
|
704 |
+
(A.shape[0], B.shape[1]))
|
705 |
+
self.args = (A, B)
|
706 |
+
|
707 |
+
def _matvec(self, x):
|
708 |
+
return self.args[0].matvec(self.args[1].matvec(x))
|
709 |
+
|
710 |
+
def _rmatvec(self, x):
|
711 |
+
return self.args[1].rmatvec(self.args[0].rmatvec(x))
|
712 |
+
|
713 |
+
def _rmatmat(self, x):
|
714 |
+
return self.args[1].rmatmat(self.args[0].rmatmat(x))
|
715 |
+
|
716 |
+
def _matmat(self, x):
|
717 |
+
return self.args[0].matmat(self.args[1].matmat(x))
|
718 |
+
|
719 |
+
def _adjoint(self):
|
720 |
+
A, B = self.args
|
721 |
+
return B.H * A.H
|
722 |
+
|
723 |
+
|
724 |
+
class _ScaledLinearOperator(LinearOperator):
|
725 |
+
def __init__(self, A, alpha):
|
726 |
+
if not isinstance(A, LinearOperator):
|
727 |
+
raise ValueError('LinearOperator expected as A')
|
728 |
+
if not np.isscalar(alpha):
|
729 |
+
raise ValueError('scalar expected as alpha')
|
730 |
+
if isinstance(A, _ScaledLinearOperator):
|
731 |
+
A, alpha_original = A.args
|
732 |
+
# Avoid in-place multiplication so that we don't accidentally mutate
|
733 |
+
# the original prefactor.
|
734 |
+
alpha = alpha * alpha_original
|
735 |
+
|
736 |
+
dtype = _get_dtype([A], [type(alpha)])
|
737 |
+
super().__init__(dtype, A.shape)
|
738 |
+
self.args = (A, alpha)
|
739 |
+
|
740 |
+
def _matvec(self, x):
|
741 |
+
return self.args[1] * self.args[0].matvec(x)
|
742 |
+
|
743 |
+
def _rmatvec(self, x):
|
744 |
+
return np.conj(self.args[1]) * self.args[0].rmatvec(x)
|
745 |
+
|
746 |
+
def _rmatmat(self, x):
|
747 |
+
return np.conj(self.args[1]) * self.args[0].rmatmat(x)
|
748 |
+
|
749 |
+
def _matmat(self, x):
|
750 |
+
return self.args[1] * self.args[0].matmat(x)
|
751 |
+
|
752 |
+
def _adjoint(self):
|
753 |
+
A, alpha = self.args
|
754 |
+
return A.H * np.conj(alpha)
|
755 |
+
|
756 |
+
|
757 |
+
class _PowerLinearOperator(LinearOperator):
|
758 |
+
def __init__(self, A, p):
|
759 |
+
if not isinstance(A, LinearOperator):
|
760 |
+
raise ValueError('LinearOperator expected as A')
|
761 |
+
if A.shape[0] != A.shape[1]:
|
762 |
+
raise ValueError('square LinearOperator expected, got %r' % A)
|
763 |
+
if not isintlike(p) or p < 0:
|
764 |
+
raise ValueError('non-negative integer expected as p')
|
765 |
+
|
766 |
+
super().__init__(_get_dtype([A]), A.shape)
|
767 |
+
self.args = (A, p)
|
768 |
+
|
769 |
+
def _power(self, fun, x):
|
770 |
+
res = np.array(x, copy=True)
|
771 |
+
for i in range(self.args[1]):
|
772 |
+
res = fun(res)
|
773 |
+
return res
|
774 |
+
|
775 |
+
def _matvec(self, x):
|
776 |
+
return self._power(self.args[0].matvec, x)
|
777 |
+
|
778 |
+
def _rmatvec(self, x):
|
779 |
+
return self._power(self.args[0].rmatvec, x)
|
780 |
+
|
781 |
+
def _rmatmat(self, x):
|
782 |
+
return self._power(self.args[0].rmatmat, x)
|
783 |
+
|
784 |
+
def _matmat(self, x):
|
785 |
+
return self._power(self.args[0].matmat, x)
|
786 |
+
|
787 |
+
def _adjoint(self):
|
788 |
+
A, p = self.args
|
789 |
+
return A.H ** p
|
790 |
+
|
791 |
+
|
792 |
+
class MatrixLinearOperator(LinearOperator):
|
793 |
+
def __init__(self, A):
|
794 |
+
super().__init__(A.dtype, A.shape)
|
795 |
+
self.A = A
|
796 |
+
self.__adj = None
|
797 |
+
self.args = (A,)
|
798 |
+
|
799 |
+
def _matmat(self, X):
|
800 |
+
return self.A.dot(X)
|
801 |
+
|
802 |
+
def _adjoint(self):
|
803 |
+
if self.__adj is None:
|
804 |
+
self.__adj = _AdjointMatrixOperator(self)
|
805 |
+
return self.__adj
|
806 |
+
|
807 |
+
class _AdjointMatrixOperator(MatrixLinearOperator):
|
808 |
+
def __init__(self, adjoint):
|
809 |
+
self.A = adjoint.A.T.conj()
|
810 |
+
self.__adjoint = adjoint
|
811 |
+
self.args = (adjoint,)
|
812 |
+
self.shape = adjoint.shape[1], adjoint.shape[0]
|
813 |
+
|
814 |
+
@property
|
815 |
+
def dtype(self):
|
816 |
+
return self.__adjoint.dtype
|
817 |
+
|
818 |
+
def _adjoint(self):
|
819 |
+
return self.__adjoint
|
820 |
+
|
821 |
+
|
822 |
+
class IdentityOperator(LinearOperator):
|
823 |
+
def __init__(self, shape, dtype=None):
|
824 |
+
super().__init__(dtype, shape)
|
825 |
+
|
826 |
+
def _matvec(self, x):
|
827 |
+
return x
|
828 |
+
|
829 |
+
def _rmatvec(self, x):
|
830 |
+
return x
|
831 |
+
|
832 |
+
def _rmatmat(self, x):
|
833 |
+
return x
|
834 |
+
|
835 |
+
def _matmat(self, x):
|
836 |
+
return x
|
837 |
+
|
838 |
+
def _adjoint(self):
|
839 |
+
return self
|
840 |
+
|
841 |
+
|
842 |
+
def aslinearoperator(A):
|
843 |
+
"""Return A as a LinearOperator.
|
844 |
+
|
845 |
+
'A' may be any of the following types:
|
846 |
+
- ndarray
|
847 |
+
- matrix
|
848 |
+
- sparse matrix (e.g. csr_matrix, lil_matrix, etc.)
|
849 |
+
- LinearOperator
|
850 |
+
- An object with .shape and .matvec attributes
|
851 |
+
|
852 |
+
See the LinearOperator documentation for additional information.
|
853 |
+
|
854 |
+
Notes
|
855 |
+
-----
|
856 |
+
If 'A' has no .dtype attribute, the data type is determined by calling
|
857 |
+
:func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this
|
858 |
+
call upon the linear operator creation.
|
859 |
+
|
860 |
+
Examples
|
861 |
+
--------
|
862 |
+
>>> import numpy as np
|
863 |
+
>>> from scipy.sparse.linalg import aslinearoperator
|
864 |
+
>>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32)
|
865 |
+
>>> aslinearoperator(M)
|
866 |
+
<2x3 MatrixLinearOperator with dtype=int32>
|
867 |
+
"""
|
868 |
+
if isinstance(A, LinearOperator):
|
869 |
+
return A
|
870 |
+
|
871 |
+
elif isinstance(A, np.ndarray) or isinstance(A, np.matrix):
|
872 |
+
if A.ndim > 2:
|
873 |
+
raise ValueError('array must have ndim <= 2')
|
874 |
+
A = np.atleast_2d(np.asarray(A))
|
875 |
+
return MatrixLinearOperator(A)
|
876 |
+
|
877 |
+
elif issparse(A) or is_pydata_spmatrix(A):
|
878 |
+
return MatrixLinearOperator(A)
|
879 |
+
|
880 |
+
else:
|
881 |
+
if hasattr(A, 'shape') and hasattr(A, 'matvec'):
|
882 |
+
rmatvec = None
|
883 |
+
rmatmat = None
|
884 |
+
dtype = None
|
885 |
+
|
886 |
+
if hasattr(A, 'rmatvec'):
|
887 |
+
rmatvec = A.rmatvec
|
888 |
+
if hasattr(A, 'rmatmat'):
|
889 |
+
rmatmat = A.rmatmat
|
890 |
+
if hasattr(A, 'dtype'):
|
891 |
+
dtype = A.dtype
|
892 |
+
return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec,
|
893 |
+
rmatmat=rmatmat, dtype=dtype)
|
894 |
+
|
895 |
+
else:
|
896 |
+
raise TypeError('type not understood')
|
venv/lib/python3.10/site-packages/scipy/sparse/linalg/_matfuncs.py
ADDED
@@ -0,0 +1,940 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Sparse matrix functions
|
3 |
+
"""
|
4 |
+
|
5 |
+
#
|
6 |
+
# Authors: Travis Oliphant, March 2002
|
7 |
+
# Anthony Scopatz, August 2012 (Sparse Updates)
|
8 |
+
# Jake Vanderplas, August 2012 (Sparse Updates)
|
9 |
+
#
|
10 |
+
|
11 |
+
__all__ = ['expm', 'inv', 'matrix_power']
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
from scipy.linalg._basic import solve, solve_triangular
|
15 |
+
|
16 |
+
from scipy.sparse._base import issparse
|
17 |
+
from scipy.sparse.linalg import spsolve
|
18 |
+
from scipy.sparse._sputils import is_pydata_spmatrix, isintlike
|
19 |
+
|
20 |
+
import scipy.sparse
|
21 |
+
import scipy.sparse.linalg
|
22 |
+
from scipy.sparse.linalg._interface import LinearOperator
|
23 |
+
from scipy.sparse._construct import eye
|
24 |
+
|
25 |
+
from ._expm_multiply import _ident_like, _exact_1_norm as _onenorm
|
26 |
+
|
27 |
+
|
28 |
+
UPPER_TRIANGULAR = 'upper_triangular'
|
29 |
+
|
30 |
+
|
31 |
+
def inv(A):
|
32 |
+
"""
|
33 |
+
Compute the inverse of a sparse matrix
|
34 |
+
|
35 |
+
Parameters
|
36 |
+
----------
|
37 |
+
A : (M, M) sparse matrix
|
38 |
+
square matrix to be inverted
|
39 |
+
|
40 |
+
Returns
|
41 |
+
-------
|
42 |
+
Ainv : (M, M) sparse matrix
|
43 |
+
inverse of `A`
|
44 |
+
|
45 |
+
Notes
|
46 |
+
-----
|
47 |
+
This computes the sparse inverse of `A`. If the inverse of `A` is expected
|
48 |
+
to be non-sparse, it will likely be faster to convert `A` to dense and use
|
49 |
+
`scipy.linalg.inv`.
|
50 |
+
|
51 |
+
Examples
|
52 |
+
--------
|
53 |
+
>>> from scipy.sparse import csc_matrix
|
54 |
+
>>> from scipy.sparse.linalg import inv
|
55 |
+
>>> A = csc_matrix([[1., 0.], [1., 2.]])
|
56 |
+
>>> Ainv = inv(A)
|
57 |
+
>>> Ainv
|
58 |
+
<2x2 sparse matrix of type '<class 'numpy.float64'>'
|
59 |
+
with 3 stored elements in Compressed Sparse Column format>
|
60 |
+
>>> A.dot(Ainv)
|
61 |
+
<2x2 sparse matrix of type '<class 'numpy.float64'>'
|
62 |
+
with 2 stored elements in Compressed Sparse Column format>
|
63 |
+
>>> A.dot(Ainv).toarray()
|
64 |
+
array([[ 1., 0.],
|
65 |
+
[ 0., 1.]])
|
66 |
+
|
67 |
+
.. versionadded:: 0.12.0
|
68 |
+
|
69 |
+
"""
|
70 |
+
# Check input
|
71 |
+
if not (scipy.sparse.issparse(A) or is_pydata_spmatrix(A)):
|
72 |
+
raise TypeError('Input must be a sparse matrix')
|
73 |
+
|
74 |
+
# Use sparse direct solver to solve "AX = I" accurately
|
75 |
+
I = _ident_like(A)
|
76 |
+
Ainv = spsolve(A, I)
|
77 |
+
return Ainv
|
78 |
+
|
79 |
+
|
80 |
+
def _onenorm_matrix_power_nnm(A, p):
|
81 |
+
"""
|
82 |
+
Compute the 1-norm of a non-negative integer power of a non-negative matrix.
|
83 |
+
|
84 |
+
Parameters
|
85 |
+
----------
|
86 |
+
A : a square ndarray or matrix or sparse matrix
|
87 |
+
Input matrix with non-negative entries.
|
88 |
+
p : non-negative integer
|
89 |
+
The power to which the matrix is to be raised.
|
90 |
+
|
91 |
+
Returns
|
92 |
+
-------
|
93 |
+
out : float
|
94 |
+
The 1-norm of the matrix power p of A.
|
95 |
+
|
96 |
+
"""
|
97 |
+
# Check input
|
98 |
+
if int(p) != p or p < 0:
|
99 |
+
raise ValueError('expected non-negative integer p')
|
100 |
+
p = int(p)
|
101 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
102 |
+
raise ValueError('expected A to be like a square matrix')
|
103 |
+
|
104 |
+
# Explicitly make a column vector so that this works when A is a
|
105 |
+
# numpy matrix (in addition to ndarray and sparse matrix).
|
106 |
+
v = np.ones((A.shape[0], 1), dtype=float)
|
107 |
+
M = A.T
|
108 |
+
for i in range(p):
|
109 |
+
v = M.dot(v)
|
110 |
+
return np.max(v)
|
111 |
+
|
112 |
+
|
113 |
+
def _is_upper_triangular(A):
|
114 |
+
# This function could possibly be of wider interest.
|
115 |
+
if issparse(A):
|
116 |
+
lower_part = scipy.sparse.tril(A, -1)
|
117 |
+
# Check structural upper triangularity,
|
118 |
+
# then coincidental upper triangularity if needed.
|
119 |
+
return lower_part.nnz == 0 or lower_part.count_nonzero() == 0
|
120 |
+
elif is_pydata_spmatrix(A):
|
121 |
+
import sparse
|
122 |
+
lower_part = sparse.tril(A, -1)
|
123 |
+
return lower_part.nnz == 0
|
124 |
+
else:
|
125 |
+
return not np.tril(A, -1).any()
|
126 |
+
|
127 |
+
|
128 |
+
def _smart_matrix_product(A, B, alpha=None, structure=None):
|
129 |
+
"""
|
130 |
+
A matrix product that knows about sparse and structured matrices.
|
131 |
+
|
132 |
+
Parameters
|
133 |
+
----------
|
134 |
+
A : 2d ndarray
|
135 |
+
First matrix.
|
136 |
+
B : 2d ndarray
|
137 |
+
Second matrix.
|
138 |
+
alpha : float
|
139 |
+
The matrix product will be scaled by this constant.
|
140 |
+
structure : str, optional
|
141 |
+
A string describing the structure of both matrices `A` and `B`.
|
142 |
+
Only `upper_triangular` is currently supported.
|
143 |
+
|
144 |
+
Returns
|
145 |
+
-------
|
146 |
+
M : 2d ndarray
|
147 |
+
Matrix product of A and B.
|
148 |
+
|
149 |
+
"""
|
150 |
+
if len(A.shape) != 2:
|
151 |
+
raise ValueError('expected A to be a rectangular matrix')
|
152 |
+
if len(B.shape) != 2:
|
153 |
+
raise ValueError('expected B to be a rectangular matrix')
|
154 |
+
f = None
|
155 |
+
if structure == UPPER_TRIANGULAR:
|
156 |
+
if (not issparse(A) and not issparse(B)
|
157 |
+
and not is_pydata_spmatrix(A) and not is_pydata_spmatrix(B)):
|
158 |
+
f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
|
159 |
+
if f is not None:
|
160 |
+
if alpha is None:
|
161 |
+
alpha = 1.
|
162 |
+
out = f(alpha, A, B)
|
163 |
+
else:
|
164 |
+
if alpha is None:
|
165 |
+
out = A.dot(B)
|
166 |
+
else:
|
167 |
+
out = alpha * A.dot(B)
|
168 |
+
return out
|
169 |
+
|
170 |
+
|
171 |
+
class MatrixPowerOperator(LinearOperator):
|
172 |
+
|
173 |
+
def __init__(self, A, p, structure=None):
|
174 |
+
if A.ndim != 2 or A.shape[0] != A.shape[1]:
|
175 |
+
raise ValueError('expected A to be like a square matrix')
|
176 |
+
if p < 0:
|
177 |
+
raise ValueError('expected p to be a non-negative integer')
|
178 |
+
self._A = A
|
179 |
+
self._p = p
|
180 |
+
self._structure = structure
|
181 |
+
self.dtype = A.dtype
|
182 |
+
self.ndim = A.ndim
|
183 |
+
self.shape = A.shape
|
184 |
+
|
185 |
+
def _matvec(self, x):
|
186 |
+
for i in range(self._p):
|
187 |
+
x = self._A.dot(x)
|
188 |
+
return x
|
189 |
+
|
190 |
+
def _rmatvec(self, x):
|
191 |
+
A_T = self._A.T
|
192 |
+
x = x.ravel()
|
193 |
+
for i in range(self._p):
|
194 |
+
x = A_T.dot(x)
|
195 |
+
return x
|
196 |
+
|
197 |
+
def _matmat(self, X):
|
198 |
+
for i in range(self._p):
|
199 |
+
X = _smart_matrix_product(self._A, X, structure=self._structure)
|
200 |
+
return X
|
201 |
+
|
202 |
+
@property
|
203 |
+
def T(self):
|
204 |
+
return MatrixPowerOperator(self._A.T, self._p)
|
205 |
+
|
206 |
+
|
207 |
+
class ProductOperator(LinearOperator):
|
208 |
+
"""
|
209 |
+
For now, this is limited to products of multiple square matrices.
|
210 |
+
"""
|
211 |
+
|
212 |
+
def __init__(self, *args, **kwargs):
|
213 |
+
self._structure = kwargs.get('structure', None)
|
214 |
+
for A in args:
|
215 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
216 |
+
raise ValueError(
|
217 |
+
'For now, the ProductOperator implementation is '
|
218 |
+
'limited to the product of multiple square matrices.')
|
219 |
+
if args:
|
220 |
+
n = args[0].shape[0]
|
221 |
+
for A in args:
|
222 |
+
for d in A.shape:
|
223 |
+
if d != n:
|
224 |
+
raise ValueError(
|
225 |
+
'The square matrices of the ProductOperator '
|
226 |
+
'must all have the same shape.')
|
227 |
+
self.shape = (n, n)
|
228 |
+
self.ndim = len(self.shape)
|
229 |
+
self.dtype = np.result_type(*[x.dtype for x in args])
|
230 |
+
self._operator_sequence = args
|
231 |
+
|
232 |
+
def _matvec(self, x):
|
233 |
+
for A in reversed(self._operator_sequence):
|
234 |
+
x = A.dot(x)
|
235 |
+
return x
|
236 |
+
|
237 |
+
def _rmatvec(self, x):
|
238 |
+
x = x.ravel()
|
239 |
+
for A in self._operator_sequence:
|
240 |
+
x = A.T.dot(x)
|
241 |
+
return x
|
242 |
+
|
243 |
+
def _matmat(self, X):
|
244 |
+
for A in reversed(self._operator_sequence):
|
245 |
+
X = _smart_matrix_product(A, X, structure=self._structure)
|
246 |
+
return X
|
247 |
+
|
248 |
+
@property
|
249 |
+
def T(self):
|
250 |
+
T_args = [A.T for A in reversed(self._operator_sequence)]
|
251 |
+
return ProductOperator(*T_args)
|
252 |
+
|
253 |
+
|
254 |
+
def _onenormest_matrix_power(A, p,
|
255 |
+
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
|
256 |
+
"""
|
257 |
+
Efficiently estimate the 1-norm of A^p.
|
258 |
+
|
259 |
+
Parameters
|
260 |
+
----------
|
261 |
+
A : ndarray
|
262 |
+
Matrix whose 1-norm of a power is to be computed.
|
263 |
+
p : int
|
264 |
+
Non-negative integer power.
|
265 |
+
t : int, optional
|
266 |
+
A positive parameter controlling the tradeoff between
|
267 |
+
accuracy versus time and memory usage.
|
268 |
+
Larger values take longer and use more memory
|
269 |
+
but give more accurate output.
|
270 |
+
itmax : int, optional
|
271 |
+
Use at most this many iterations.
|
272 |
+
compute_v : bool, optional
|
273 |
+
Request a norm-maximizing linear operator input vector if True.
|
274 |
+
compute_w : bool, optional
|
275 |
+
Request a norm-maximizing linear operator output vector if True.
|
276 |
+
|
277 |
+
Returns
|
278 |
+
-------
|
279 |
+
est : float
|
280 |
+
An underestimate of the 1-norm of the sparse matrix.
|
281 |
+
v : ndarray, optional
|
282 |
+
The vector such that ||Av||_1 == est*||v||_1.
|
283 |
+
It can be thought of as an input to the linear operator
|
284 |
+
that gives an output with particularly large norm.
|
285 |
+
w : ndarray, optional
|
286 |
+
The vector Av which has relatively large 1-norm.
|
287 |
+
It can be thought of as an output of the linear operator
|
288 |
+
that is relatively large in norm compared to the input.
|
289 |
+
|
290 |
+
"""
|
291 |
+
return scipy.sparse.linalg.onenormest(
|
292 |
+
MatrixPowerOperator(A, p, structure=structure))
|
293 |
+
|
294 |
+
|
295 |
+
def _onenormest_product(operator_seq,
|
296 |
+
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
|
297 |
+
"""
|
298 |
+
Efficiently estimate the 1-norm of the matrix product of the args.
|
299 |
+
|
300 |
+
Parameters
|
301 |
+
----------
|
302 |
+
operator_seq : linear operator sequence
|
303 |
+
Matrices whose 1-norm of product is to be computed.
|
304 |
+
t : int, optional
|
305 |
+
A positive parameter controlling the tradeoff between
|
306 |
+
accuracy versus time and memory usage.
|
307 |
+
Larger values take longer and use more memory
|
308 |
+
but give more accurate output.
|
309 |
+
itmax : int, optional
|
310 |
+
Use at most this many iterations.
|
311 |
+
compute_v : bool, optional
|
312 |
+
Request a norm-maximizing linear operator input vector if True.
|
313 |
+
compute_w : bool, optional
|
314 |
+
Request a norm-maximizing linear operator output vector if True.
|
315 |
+
structure : str, optional
|
316 |
+
A string describing the structure of all operators.
|
317 |
+
Only `upper_triangular` is currently supported.
|
318 |
+
|
319 |
+
Returns
|
320 |
+
-------
|
321 |
+
est : float
|
322 |
+
An underestimate of the 1-norm of the sparse matrix.
|
323 |
+
v : ndarray, optional
|
324 |
+
The vector such that ||Av||_1 == est*||v||_1.
|
325 |
+
It can be thought of as an input to the linear operator
|
326 |
+
that gives an output with particularly large norm.
|
327 |
+
w : ndarray, optional
|
328 |
+
The vector Av which has relatively large 1-norm.
|
329 |
+
It can be thought of as an output of the linear operator
|
330 |
+
that is relatively large in norm compared to the input.
|
331 |
+
|
332 |
+
"""
|
333 |
+
return scipy.sparse.linalg.onenormest(
|
334 |
+
ProductOperator(*operator_seq, structure=structure))
|
335 |
+
|
336 |
+
|
337 |
+
class _ExpmPadeHelper:
|
338 |
+
"""
|
339 |
+
Help lazily evaluate a matrix exponential.
|
340 |
+
|
341 |
+
The idea is to not do more work than we need for high expm precision,
|
342 |
+
so we lazily compute matrix powers and store or precompute
|
343 |
+
other properties of the matrix.
|
344 |
+
|
345 |
+
"""
|
346 |
+
|
347 |
+
def __init__(self, A, structure=None, use_exact_onenorm=False):
|
348 |
+
"""
|
349 |
+
Initialize the object.
|
350 |
+
|
351 |
+
Parameters
|
352 |
+
----------
|
353 |
+
A : a dense or sparse square numpy matrix or ndarray
|
354 |
+
The matrix to be exponentiated.
|
355 |
+
structure : str, optional
|
356 |
+
A string describing the structure of matrix `A`.
|
357 |
+
Only `upper_triangular` is currently supported.
|
358 |
+
use_exact_onenorm : bool, optional
|
359 |
+
If True then only the exact one-norm of matrix powers and products
|
360 |
+
will be used. Otherwise, the one-norm of powers and products
|
361 |
+
may initially be estimated.
|
362 |
+
"""
|
363 |
+
self.A = A
|
364 |
+
self._A2 = None
|
365 |
+
self._A4 = None
|
366 |
+
self._A6 = None
|
367 |
+
self._A8 = None
|
368 |
+
self._A10 = None
|
369 |
+
self._d4_exact = None
|
370 |
+
self._d6_exact = None
|
371 |
+
self._d8_exact = None
|
372 |
+
self._d10_exact = None
|
373 |
+
self._d4_approx = None
|
374 |
+
self._d6_approx = None
|
375 |
+
self._d8_approx = None
|
376 |
+
self._d10_approx = None
|
377 |
+
self.ident = _ident_like(A)
|
378 |
+
self.structure = structure
|
379 |
+
self.use_exact_onenorm = use_exact_onenorm
|
380 |
+
|
381 |
+
@property
|
382 |
+
def A2(self):
|
383 |
+
if self._A2 is None:
|
384 |
+
self._A2 = _smart_matrix_product(
|
385 |
+
self.A, self.A, structure=self.structure)
|
386 |
+
return self._A2
|
387 |
+
|
388 |
+
@property
|
389 |
+
def A4(self):
|
390 |
+
if self._A4 is None:
|
391 |
+
self._A4 = _smart_matrix_product(
|
392 |
+
self.A2, self.A2, structure=self.structure)
|
393 |
+
return self._A4
|
394 |
+
|
395 |
+
@property
|
396 |
+
def A6(self):
|
397 |
+
if self._A6 is None:
|
398 |
+
self._A6 = _smart_matrix_product(
|
399 |
+
self.A4, self.A2, structure=self.structure)
|
400 |
+
return self._A6
|
401 |
+
|
402 |
+
@property
|
403 |
+
def A8(self):
|
404 |
+
if self._A8 is None:
|
405 |
+
self._A8 = _smart_matrix_product(
|
406 |
+
self.A6, self.A2, structure=self.structure)
|
407 |
+
return self._A8
|
408 |
+
|
409 |
+
@property
|
410 |
+
def A10(self):
|
411 |
+
if self._A10 is None:
|
412 |
+
self._A10 = _smart_matrix_product(
|
413 |
+
self.A4, self.A6, structure=self.structure)
|
414 |
+
return self._A10
|
415 |
+
|
416 |
+
@property
|
417 |
+
def d4_tight(self):
|
418 |
+
if self._d4_exact is None:
|
419 |
+
self._d4_exact = _onenorm(self.A4)**(1/4.)
|
420 |
+
return self._d4_exact
|
421 |
+
|
422 |
+
@property
|
423 |
+
def d6_tight(self):
|
424 |
+
if self._d6_exact is None:
|
425 |
+
self._d6_exact = _onenorm(self.A6)**(1/6.)
|
426 |
+
return self._d6_exact
|
427 |
+
|
428 |
+
@property
|
429 |
+
def d8_tight(self):
|
430 |
+
if self._d8_exact is None:
|
431 |
+
self._d8_exact = _onenorm(self.A8)**(1/8.)
|
432 |
+
return self._d8_exact
|
433 |
+
|
434 |
+
@property
|
435 |
+
def d10_tight(self):
|
436 |
+
if self._d10_exact is None:
|
437 |
+
self._d10_exact = _onenorm(self.A10)**(1/10.)
|
438 |
+
return self._d10_exact
|
439 |
+
|
440 |
+
@property
|
441 |
+
def d4_loose(self):
|
442 |
+
if self.use_exact_onenorm:
|
443 |
+
return self.d4_tight
|
444 |
+
if self._d4_exact is not None:
|
445 |
+
return self._d4_exact
|
446 |
+
else:
|
447 |
+
if self._d4_approx is None:
|
448 |
+
self._d4_approx = _onenormest_matrix_power(self.A2, 2,
|
449 |
+
structure=self.structure)**(1/4.)
|
450 |
+
return self._d4_approx
|
451 |
+
|
452 |
+
@property
|
453 |
+
def d6_loose(self):
|
454 |
+
if self.use_exact_onenorm:
|
455 |
+
return self.d6_tight
|
456 |
+
if self._d6_exact is not None:
|
457 |
+
return self._d6_exact
|
458 |
+
else:
|
459 |
+
if self._d6_approx is None:
|
460 |
+
self._d6_approx = _onenormest_matrix_power(self.A2, 3,
|
461 |
+
structure=self.structure)**(1/6.)
|
462 |
+
return self._d6_approx
|
463 |
+
|
464 |
+
@property
|
465 |
+
def d8_loose(self):
|
466 |
+
if self.use_exact_onenorm:
|
467 |
+
return self.d8_tight
|
468 |
+
if self._d8_exact is not None:
|
469 |
+
return self._d8_exact
|
470 |
+
else:
|
471 |
+
if self._d8_approx is None:
|
472 |
+
self._d8_approx = _onenormest_matrix_power(self.A4, 2,
|
473 |
+
structure=self.structure)**(1/8.)
|
474 |
+
return self._d8_approx
|
475 |
+
|
476 |
+
@property
|
477 |
+
def d10_loose(self):
|
478 |
+
if self.use_exact_onenorm:
|
479 |
+
return self.d10_tight
|
480 |
+
if self._d10_exact is not None:
|
481 |
+
return self._d10_exact
|
482 |
+
else:
|
483 |
+
if self._d10_approx is None:
|
484 |
+
self._d10_approx = _onenormest_product((self.A4, self.A6),
|
485 |
+
structure=self.structure)**(1/10.)
|
486 |
+
return self._d10_approx
|
487 |
+
|
488 |
+
def pade3(self):
|
489 |
+
b = (120., 60., 12., 1.)
|
490 |
+
U = _smart_matrix_product(self.A,
|
491 |
+
b[3]*self.A2 + b[1]*self.ident,
|
492 |
+
structure=self.structure)
|
493 |
+
V = b[2]*self.A2 + b[0]*self.ident
|
494 |
+
return U, V
|
495 |
+
|
496 |
+
def pade5(self):
|
497 |
+
b = (30240., 15120., 3360., 420., 30., 1.)
|
498 |
+
U = _smart_matrix_product(self.A,
|
499 |
+
b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
|
500 |
+
structure=self.structure)
|
501 |
+
V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
|
502 |
+
return U, V
|
503 |
+
|
504 |
+
def pade7(self):
|
505 |
+
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
|
506 |
+
U = _smart_matrix_product(self.A,
|
507 |
+
b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
|
508 |
+
structure=self.structure)
|
509 |
+
V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
|
510 |
+
return U, V
|
511 |
+
|
512 |
+
def pade9(self):
|
513 |
+
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
|
514 |
+
2162160., 110880., 3960., 90., 1.)
|
515 |
+
U = _smart_matrix_product(self.A,
|
516 |
+
(b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 +
|
517 |
+
b[3]*self.A2 + b[1]*self.ident),
|
518 |
+
structure=self.structure)
|
519 |
+
V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 +
|
520 |
+
b[2]*self.A2 + b[0]*self.ident)
|
521 |
+
return U, V
|
522 |
+
|
523 |
+
def pade13_scaled(self, s):
|
524 |
+
b = (64764752532480000., 32382376266240000., 7771770303897600.,
|
525 |
+
1187353796428800., 129060195264000., 10559470521600.,
|
526 |
+
670442572800., 33522128640., 1323241920., 40840800., 960960.,
|
527 |
+
16380., 182., 1.)
|
528 |
+
B = self.A * 2**-s
|
529 |
+
B2 = self.A2 * 2**(-2*s)
|
530 |
+
B4 = self.A4 * 2**(-4*s)
|
531 |
+
B6 = self.A6 * 2**(-6*s)
|
532 |
+
U2 = _smart_matrix_product(B6,
|
533 |
+
b[13]*B6 + b[11]*B4 + b[9]*B2,
|
534 |
+
structure=self.structure)
|
535 |
+
U = _smart_matrix_product(B,
|
536 |
+
(U2 + b[7]*B6 + b[5]*B4 +
|
537 |
+
b[3]*B2 + b[1]*self.ident),
|
538 |
+
structure=self.structure)
|
539 |
+
V2 = _smart_matrix_product(B6,
|
540 |
+
b[12]*B6 + b[10]*B4 + b[8]*B2,
|
541 |
+
structure=self.structure)
|
542 |
+
V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
|
543 |
+
return U, V
|
544 |
+
|
545 |
+
|
546 |
+
def expm(A):
|
547 |
+
"""
|
548 |
+
Compute the matrix exponential using Pade approximation.
|
549 |
+
|
550 |
+
Parameters
|
551 |
+
----------
|
552 |
+
A : (M,M) array_like or sparse matrix
|
553 |
+
2D Array or Matrix (sparse or dense) to be exponentiated
|
554 |
+
|
555 |
+
Returns
|
556 |
+
-------
|
557 |
+
expA : (M,M) ndarray
|
558 |
+
Matrix exponential of `A`
|
559 |
+
|
560 |
+
Notes
|
561 |
+
-----
|
562 |
+
This is algorithm (6.1) which is a simplification of algorithm (5.1).
|
563 |
+
|
564 |
+
.. versionadded:: 0.12.0
|
565 |
+
|
566 |
+
References
|
567 |
+
----------
|
568 |
+
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
|
569 |
+
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
|
570 |
+
SIAM Journal on Matrix Analysis and Applications.
|
571 |
+
31 (3). pp. 970-989. ISSN 1095-7162
|
572 |
+
|
573 |
+
Examples
|
574 |
+
--------
|
575 |
+
>>> from scipy.sparse import csc_matrix
|
576 |
+
>>> from scipy.sparse.linalg import expm
|
577 |
+
>>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
|
578 |
+
>>> A.toarray()
|
579 |
+
array([[1, 0, 0],
|
580 |
+
[0, 2, 0],
|
581 |
+
[0, 0, 3]], dtype=int64)
|
582 |
+
>>> Aexp = expm(A)
|
583 |
+
>>> Aexp
|
584 |
+
<3x3 sparse matrix of type '<class 'numpy.float64'>'
|
585 |
+
with 3 stored elements in Compressed Sparse Column format>
|
586 |
+
>>> Aexp.toarray()
|
587 |
+
array([[ 2.71828183, 0. , 0. ],
|
588 |
+
[ 0. , 7.3890561 , 0. ],
|
589 |
+
[ 0. , 0. , 20.08553692]])
|
590 |
+
"""
|
591 |
+
return _expm(A, use_exact_onenorm='auto')
|
592 |
+
|
593 |
+
|
594 |
+
def _expm(A, use_exact_onenorm):
|
595 |
+
# Core of expm, separated to allow testing exact and approximate
|
596 |
+
# algorithms.
|
597 |
+
|
598 |
+
# Avoid indiscriminate asarray() to allow sparse or other strange arrays.
|
599 |
+
if isinstance(A, (list, tuple, np.matrix)):
|
600 |
+
A = np.asarray(A)
|
601 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
602 |
+
raise ValueError('expected a square matrix')
|
603 |
+
|
604 |
+
# gracefully handle size-0 input,
|
605 |
+
# carefully handling sparse scenario
|
606 |
+
if A.shape == (0, 0):
|
607 |
+
out = np.zeros([0, 0], dtype=A.dtype)
|
608 |
+
if issparse(A) or is_pydata_spmatrix(A):
|
609 |
+
return A.__class__(out)
|
610 |
+
return out
|
611 |
+
|
612 |
+
# Trivial case
|
613 |
+
if A.shape == (1, 1):
|
614 |
+
out = [[np.exp(A[0, 0])]]
|
615 |
+
|
616 |
+
# Avoid indiscriminate casting to ndarray to
|
617 |
+
# allow for sparse or other strange arrays
|
618 |
+
if issparse(A) or is_pydata_spmatrix(A):
|
619 |
+
return A.__class__(out)
|
620 |
+
|
621 |
+
return np.array(out)
|
622 |
+
|
623 |
+
# Ensure input is of float type, to avoid integer overflows etc.
|
624 |
+
if ((isinstance(A, np.ndarray) or issparse(A) or is_pydata_spmatrix(A))
|
625 |
+
and not np.issubdtype(A.dtype, np.inexact)):
|
626 |
+
A = A.astype(float)
|
627 |
+
|
628 |
+
# Detect upper triangularity.
|
629 |
+
structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
|
630 |
+
|
631 |
+
if use_exact_onenorm == "auto":
|
632 |
+
# Hardcode a matrix order threshold for exact vs. estimated one-norms.
|
633 |
+
use_exact_onenorm = A.shape[0] < 200
|
634 |
+
|
635 |
+
# Track functions of A to help compute the matrix exponential.
|
636 |
+
h = _ExpmPadeHelper(
|
637 |
+
A, structure=structure, use_exact_onenorm=use_exact_onenorm)
|
638 |
+
|
639 |
+
# Try Pade order 3.
|
640 |
+
eta_1 = max(h.d4_loose, h.d6_loose)
|
641 |
+
if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0:
|
642 |
+
U, V = h.pade3()
|
643 |
+
return _solve_P_Q(U, V, structure=structure)
|
644 |
+
|
645 |
+
# Try Pade order 5.
|
646 |
+
eta_2 = max(h.d4_tight, h.d6_loose)
|
647 |
+
if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0:
|
648 |
+
U, V = h.pade5()
|
649 |
+
return _solve_P_Q(U, V, structure=structure)
|
650 |
+
|
651 |
+
# Try Pade orders 7 and 9.
|
652 |
+
eta_3 = max(h.d6_tight, h.d8_loose)
|
653 |
+
if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0:
|
654 |
+
U, V = h.pade7()
|
655 |
+
return _solve_P_Q(U, V, structure=structure)
|
656 |
+
if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0:
|
657 |
+
U, V = h.pade9()
|
658 |
+
return _solve_P_Q(U, V, structure=structure)
|
659 |
+
|
660 |
+
# Use Pade order 13.
|
661 |
+
eta_4 = max(h.d8_loose, h.d10_loose)
|
662 |
+
eta_5 = min(eta_3, eta_4)
|
663 |
+
theta_13 = 4.25
|
664 |
+
|
665 |
+
# Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13
|
666 |
+
if eta_5 == 0:
|
667 |
+
# Nilpotent special case
|
668 |
+
s = 0
|
669 |
+
else:
|
670 |
+
s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
|
671 |
+
s = s + _ell(2**-s * h.A, 13)
|
672 |
+
U, V = h.pade13_scaled(s)
|
673 |
+
X = _solve_P_Q(U, V, structure=structure)
|
674 |
+
if structure == UPPER_TRIANGULAR:
|
675 |
+
# Invoke Code Fragment 2.1.
|
676 |
+
X = _fragment_2_1(X, h.A, s)
|
677 |
+
else:
|
678 |
+
# X = r_13(A)^(2^s) by repeated squaring.
|
679 |
+
for i in range(s):
|
680 |
+
X = X.dot(X)
|
681 |
+
return X
|
682 |
+
|
683 |
+
|
684 |
+
def _solve_P_Q(U, V, structure=None):
|
685 |
+
"""
|
686 |
+
A helper function for expm_2009.
|
687 |
+
|
688 |
+
Parameters
|
689 |
+
----------
|
690 |
+
U : ndarray
|
691 |
+
Pade numerator.
|
692 |
+
V : ndarray
|
693 |
+
Pade denominator.
|
694 |
+
structure : str, optional
|
695 |
+
A string describing the structure of both matrices `U` and `V`.
|
696 |
+
Only `upper_triangular` is currently supported.
|
697 |
+
|
698 |
+
Notes
|
699 |
+
-----
|
700 |
+
The `structure` argument is inspired by similar args
|
701 |
+
for theano and cvxopt functions.
|
702 |
+
|
703 |
+
"""
|
704 |
+
P = U + V
|
705 |
+
Q = -U + V
|
706 |
+
if issparse(U) or is_pydata_spmatrix(U):
|
707 |
+
return spsolve(Q, P)
|
708 |
+
elif structure is None:
|
709 |
+
return solve(Q, P)
|
710 |
+
elif structure == UPPER_TRIANGULAR:
|
711 |
+
return solve_triangular(Q, P)
|
712 |
+
else:
|
713 |
+
raise ValueError('unsupported matrix structure: ' + str(structure))
|
714 |
+
|
715 |
+
|
716 |
+
def _exp_sinch(a, x):
|
717 |
+
"""
|
718 |
+
Stably evaluate exp(a)*sinh(x)/x
|
719 |
+
|
720 |
+
Notes
|
721 |
+
-----
|
722 |
+
The strategy of falling back to a sixth order Taylor expansion
|
723 |
+
was suggested by the Spallation Neutron Source docs
|
724 |
+
which was found on the internet by google search.
|
725 |
+
http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
|
726 |
+
The details of the cutoff point and the Horner-like evaluation
|
727 |
+
was picked without reference to anything in particular.
|
728 |
+
|
729 |
+
Note that sinch is not currently implemented in scipy.special,
|
730 |
+
whereas the "engineer's" definition of sinc is implemented.
|
731 |
+
The implementation of sinc involves a scaling factor of pi
|
732 |
+
that distinguishes it from the "mathematician's" version of sinc.
|
733 |
+
|
734 |
+
"""
|
735 |
+
|
736 |
+
# If x is small then use sixth order Taylor expansion.
|
737 |
+
# How small is small? I am using the point where the relative error
|
738 |
+
# of the approximation is less than 1e-14.
|
739 |
+
# If x is large then directly evaluate sinh(x) / x.
|
740 |
+
if abs(x) < 0.0135:
|
741 |
+
x2 = x*x
|
742 |
+
return np.exp(a) * (1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.))))
|
743 |
+
else:
|
744 |
+
return (np.exp(a + x) - np.exp(a - x)) / (2*x)
|
745 |
+
|
746 |
+
|
747 |
+
def _eq_10_42(lam_1, lam_2, t_12):
|
748 |
+
"""
|
749 |
+
Equation (10.42) of Functions of Matrices: Theory and Computation.
|
750 |
+
|
751 |
+
Notes
|
752 |
+
-----
|
753 |
+
This is a helper function for _fragment_2_1 of expm_2009.
|
754 |
+
Equation (10.42) is on page 251 in the section on Schur algorithms.
|
755 |
+
In particular, section 10.4.3 explains the Schur-Parlett algorithm.
|
756 |
+
expm([[lam_1, t_12], [0, lam_1])
|
757 |
+
=
|
758 |
+
[[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
|
759 |
+
[0, exp(lam_2)]
|
760 |
+
"""
|
761 |
+
|
762 |
+
# The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
|
763 |
+
# apparently suffers from cancellation, according to Higham's textbook.
|
764 |
+
# A nice implementation of sinch, defined as sinh(x)/x,
|
765 |
+
# will apparently work around the cancellation.
|
766 |
+
a = 0.5 * (lam_1 + lam_2)
|
767 |
+
b = 0.5 * (lam_1 - lam_2)
|
768 |
+
return t_12 * _exp_sinch(a, b)
|
769 |
+
|
770 |
+
|
771 |
+
def _fragment_2_1(X, T, s):
|
772 |
+
"""
|
773 |
+
A helper function for expm_2009.
|
774 |
+
|
775 |
+
Notes
|
776 |
+
-----
|
777 |
+
The argument X is modified in-place, but this modification is not the same
|
778 |
+
as the returned value of the function.
|
779 |
+
This function also takes pains to do things in ways that are compatible
|
780 |
+
with sparse matrices, for example by avoiding fancy indexing
|
781 |
+
and by using methods of the matrices whenever possible instead of
|
782 |
+
using functions of the numpy or scipy libraries themselves.
|
783 |
+
|
784 |
+
"""
|
785 |
+
# Form X = r_m(2^-s T)
|
786 |
+
# Replace diag(X) by exp(2^-s diag(T)).
|
787 |
+
n = X.shape[0]
|
788 |
+
diag_T = np.ravel(T.diagonal().copy())
|
789 |
+
|
790 |
+
# Replace diag(X) by exp(2^-s diag(T)).
|
791 |
+
scale = 2 ** -s
|
792 |
+
exp_diag = np.exp(scale * diag_T)
|
793 |
+
for k in range(n):
|
794 |
+
X[k, k] = exp_diag[k]
|
795 |
+
|
796 |
+
for i in range(s-1, -1, -1):
|
797 |
+
X = X.dot(X)
|
798 |
+
|
799 |
+
# Replace diag(X) by exp(2^-i diag(T)).
|
800 |
+
scale = 2 ** -i
|
801 |
+
exp_diag = np.exp(scale * diag_T)
|
802 |
+
for k in range(n):
|
803 |
+
X[k, k] = exp_diag[k]
|
804 |
+
|
805 |
+
# Replace (first) superdiagonal of X by explicit formula
|
806 |
+
# for superdiagonal of exp(2^-i T) from Eq (10.42) of
|
807 |
+
# the author's 2008 textbook
|
808 |
+
# Functions of Matrices: Theory and Computation.
|
809 |
+
for k in range(n-1):
|
810 |
+
lam_1 = scale * diag_T[k]
|
811 |
+
lam_2 = scale * diag_T[k+1]
|
812 |
+
t_12 = scale * T[k, k+1]
|
813 |
+
value = _eq_10_42(lam_1, lam_2, t_12)
|
814 |
+
X[k, k+1] = value
|
815 |
+
|
816 |
+
# Return the updated X matrix.
|
817 |
+
return X
|
818 |
+
|
819 |
+
|
820 |
+
def _ell(A, m):
|
821 |
+
"""
|
822 |
+
A helper function for expm_2009.
|
823 |
+
|
824 |
+
Parameters
|
825 |
+
----------
|
826 |
+
A : linear operator
|
827 |
+
A linear operator whose norm of power we care about.
|
828 |
+
m : int
|
829 |
+
The power of the linear operator
|
830 |
+
|
831 |
+
Returns
|
832 |
+
-------
|
833 |
+
value : int
|
834 |
+
A value related to a bound.
|
835 |
+
|
836 |
+
"""
|
837 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
838 |
+
raise ValueError('expected A to be like a square matrix')
|
839 |
+
|
840 |
+
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
|
841 |
+
# They are coefficients of terms of a generating function series expansion.
|
842 |
+
c_i = {3: 100800.,
|
843 |
+
5: 10059033600.,
|
844 |
+
7: 4487938430976000.,
|
845 |
+
9: 5914384781877411840000.,
|
846 |
+
13: 113250775606021113483283660800000000.
|
847 |
+
}
|
848 |
+
abs_c_recip = c_i[m]
|
849 |
+
|
850 |
+
# This is explained after Eq. (1.2) of the 2009 expm paper.
|
851 |
+
# It is the "unit roundoff" of IEEE double precision arithmetic.
|
852 |
+
u = 2**-53
|
853 |
+
|
854 |
+
# Compute the one-norm of matrix power p of abs(A).
|
855 |
+
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1)
|
856 |
+
|
857 |
+
# Treat zero norm as a special case.
|
858 |
+
if not A_abs_onenorm:
|
859 |
+
return 0
|
860 |
+
|
861 |
+
alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
|
862 |
+
log2_alpha_div_u = np.log2(alpha/u)
|
863 |
+
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
|
864 |
+
return max(value, 0)
|
865 |
+
|
866 |
+
def matrix_power(A, power):
|
867 |
+
"""
|
868 |
+
Raise a square matrix to the integer power, `power`.
|
869 |
+
|
870 |
+
For non-negative integers, ``A**power`` is computed using repeated
|
871 |
+
matrix multiplications. Negative integers are not supported.
|
872 |
+
|
873 |
+
Parameters
|
874 |
+
----------
|
875 |
+
A : (M, M) square sparse array or matrix
|
876 |
+
sparse array that will be raised to power `power`
|
877 |
+
power : int
|
878 |
+
Exponent used to raise sparse array `A`
|
879 |
+
|
880 |
+
Returns
|
881 |
+
-------
|
882 |
+
A**power : (M, M) sparse array or matrix
|
883 |
+
The output matrix will be the same shape as A, and will preserve
|
884 |
+
the class of A, but the format of the output may be changed.
|
885 |
+
|
886 |
+
Notes
|
887 |
+
-----
|
888 |
+
This uses a recursive implementation of the matrix power. For computing
|
889 |
+
the matrix power using a reasonably large `power`, this may be less efficient
|
890 |
+
than computing the product directly, using A @ A @ ... @ A.
|
891 |
+
This is contingent upon the number of nonzero entries in the matrix.
|
892 |
+
|
893 |
+
.. versionadded:: 1.12.0
|
894 |
+
|
895 |
+
Examples
|
896 |
+
--------
|
897 |
+
>>> from scipy import sparse
|
898 |
+
>>> A = sparse.csc_array([[0,1,0],[1,0,1],[0,1,0]])
|
899 |
+
>>> A.todense()
|
900 |
+
array([[0, 1, 0],
|
901 |
+
[1, 0, 1],
|
902 |
+
[0, 1, 0]])
|
903 |
+
>>> (A @ A).todense()
|
904 |
+
array([[1, 0, 1],
|
905 |
+
[0, 2, 0],
|
906 |
+
[1, 0, 1]])
|
907 |
+
>>> A2 = sparse.linalg.matrix_power(A, 2)
|
908 |
+
>>> A2.todense()
|
909 |
+
array([[1, 0, 1],
|
910 |
+
[0, 2, 0],
|
911 |
+
[1, 0, 1]])
|
912 |
+
>>> A4 = sparse.linalg.matrix_power(A, 4)
|
913 |
+
>>> A4.todense()
|
914 |
+
array([[2, 0, 2],
|
915 |
+
[0, 4, 0],
|
916 |
+
[2, 0, 2]])
|
917 |
+
|
918 |
+
"""
|
919 |
+
M, N = A.shape
|
920 |
+
if M != N:
|
921 |
+
raise TypeError('sparse matrix is not square')
|
922 |
+
|
923 |
+
if isintlike(power):
|
924 |
+
power = int(power)
|
925 |
+
if power < 0:
|
926 |
+
raise ValueError('exponent must be >= 0')
|
927 |
+
|
928 |
+
if power == 0:
|
929 |
+
return eye(M, dtype=A.dtype)
|
930 |
+
|
931 |
+
if power == 1:
|
932 |
+
return A.copy()
|
933 |
+
|
934 |
+
tmp = matrix_power(A, power // 2)
|
935 |
+
if power % 2:
|
936 |
+
return A @ tmp @ tmp
|
937 |
+
else:
|
938 |
+
return tmp @ tmp
|
939 |
+
else:
|
940 |
+
raise ValueError("exponent must be an integer")
|
venv/lib/python3.10/site-packages/scipy/sparse/linalg/_norm.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Sparse matrix norms.
|
2 |
+
|
3 |
+
"""
|
4 |
+
import numpy as np
|
5 |
+
from scipy.sparse import issparse
|
6 |
+
from scipy.sparse.linalg import svds
|
7 |
+
import scipy.sparse as sp
|
8 |
+
|
9 |
+
from numpy import sqrt, abs
|
10 |
+
|
11 |
+
__all__ = ['norm']
|
12 |
+
|
13 |
+
|
14 |
+
def _sparse_frobenius_norm(x):
|
15 |
+
data = sp._sputils._todata(x)
|
16 |
+
return np.linalg.norm(data)
|
17 |
+
|
18 |
+
|
19 |
+
def norm(x, ord=None, axis=None):
|
20 |
+
"""
|
21 |
+
Norm of a sparse matrix
|
22 |
+
|
23 |
+
This function is able to return one of seven different matrix norms,
|
24 |
+
depending on the value of the ``ord`` parameter.
|
25 |
+
|
26 |
+
Parameters
|
27 |
+
----------
|
28 |
+
x : a sparse matrix
|
29 |
+
Input sparse matrix.
|
30 |
+
ord : {non-zero int, inf, -inf, 'fro'}, optional
|
31 |
+
Order of the norm (see table under ``Notes``). inf means numpy's
|
32 |
+
`inf` object.
|
33 |
+
axis : {int, 2-tuple of ints, None}, optional
|
34 |
+
If `axis` is an integer, it specifies the axis of `x` along which to
|
35 |
+
compute the vector norms. If `axis` is a 2-tuple, it specifies the
|
36 |
+
axes that hold 2-D matrices, and the matrix norms of these matrices
|
37 |
+
are computed. If `axis` is None then either a vector norm (when `x`
|
38 |
+
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
|
39 |
+
|
40 |
+
Returns
|
41 |
+
-------
|
42 |
+
n : float or ndarray
|
43 |
+
|
44 |
+
Notes
|
45 |
+
-----
|
46 |
+
Some of the ord are not implemented because some associated functions like,
|
47 |
+
_multi_svd_norm, are not yet available for sparse matrix.
|
48 |
+
|
49 |
+
This docstring is modified based on numpy.linalg.norm.
|
50 |
+
https://github.com/numpy/numpy/blob/main/numpy/linalg/linalg.py
|
51 |
+
|
52 |
+
The following norms can be calculated:
|
53 |
+
|
54 |
+
===== ============================
|
55 |
+
ord norm for sparse matrices
|
56 |
+
===== ============================
|
57 |
+
None Frobenius norm
|
58 |
+
'fro' Frobenius norm
|
59 |
+
inf max(sum(abs(x), axis=1))
|
60 |
+
-inf min(sum(abs(x), axis=1))
|
61 |
+
0 abs(x).sum(axis=axis)
|
62 |
+
1 max(sum(abs(x), axis=0))
|
63 |
+
-1 min(sum(abs(x), axis=0))
|
64 |
+
2 Spectral norm (the largest singular value)
|
65 |
+
-2 Not implemented
|
66 |
+
other Not implemented
|
67 |
+
===== ============================
|
68 |
+
|
69 |
+
The Frobenius norm is given by [1]_:
|
70 |
+
|
71 |
+
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
|
72 |
+
|
73 |
+
References
|
74 |
+
----------
|
75 |
+
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
|
76 |
+
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
|
77 |
+
|
78 |
+
Examples
|
79 |
+
--------
|
80 |
+
>>> from scipy.sparse import *
|
81 |
+
>>> import numpy as np
|
82 |
+
>>> from scipy.sparse.linalg import norm
|
83 |
+
>>> a = np.arange(9) - 4
|
84 |
+
>>> a
|
85 |
+
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
|
86 |
+
>>> b = a.reshape((3, 3))
|
87 |
+
>>> b
|
88 |
+
array([[-4, -3, -2],
|
89 |
+
[-1, 0, 1],
|
90 |
+
[ 2, 3, 4]])
|
91 |
+
|
92 |
+
>>> b = csr_matrix(b)
|
93 |
+
>>> norm(b)
|
94 |
+
7.745966692414834
|
95 |
+
>>> norm(b, 'fro')
|
96 |
+
7.745966692414834
|
97 |
+
>>> norm(b, np.inf)
|
98 |
+
9
|
99 |
+
>>> norm(b, -np.inf)
|
100 |
+
2
|
101 |
+
>>> norm(b, 1)
|
102 |
+
7
|
103 |
+
>>> norm(b, -1)
|
104 |
+
6
|
105 |
+
|
106 |
+
The matrix 2-norm or the spectral norm is the largest singular
|
107 |
+
value, computed approximately and with limitations.
|
108 |
+
|
109 |
+
>>> b = diags([-1, 1], [0, 1], shape=(9, 10))
|
110 |
+
>>> norm(b, 2)
|
111 |
+
1.9753...
|
112 |
+
"""
|
113 |
+
if not issparse(x):
|
114 |
+
raise TypeError("input is not sparse. use numpy.linalg.norm")
|
115 |
+
|
116 |
+
# Check the default case first and handle it immediately.
|
117 |
+
if axis is None and ord in (None, 'fro', 'f'):
|
118 |
+
return _sparse_frobenius_norm(x)
|
119 |
+
|
120 |
+
# Some norms require functions that are not implemented for all types.
|
121 |
+
x = x.tocsr()
|
122 |
+
|
123 |
+
if axis is None:
|
124 |
+
axis = (0, 1)
|
125 |
+
elif not isinstance(axis, tuple):
|
126 |
+
msg = "'axis' must be None, an integer or a tuple of integers"
|
127 |
+
try:
|
128 |
+
int_axis = int(axis)
|
129 |
+
except TypeError as e:
|
130 |
+
raise TypeError(msg) from e
|
131 |
+
if axis != int_axis:
|
132 |
+
raise TypeError(msg)
|
133 |
+
axis = (int_axis,)
|
134 |
+
|
135 |
+
nd = 2
|
136 |
+
if len(axis) == 2:
|
137 |
+
row_axis, col_axis = axis
|
138 |
+
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
|
139 |
+
message = f'Invalid axis {axis!r} for an array with shape {x.shape!r}'
|
140 |
+
raise ValueError(message)
|
141 |
+
if row_axis % nd == col_axis % nd:
|
142 |
+
raise ValueError('Duplicate axes given.')
|
143 |
+
if ord == 2:
|
144 |
+
# Only solver="lobpcg" supports all numpy dtypes
|
145 |
+
_, s, _ = svds(x, k=1, solver="lobpcg")
|
146 |
+
return s[0]
|
147 |
+
elif ord == -2:
|
148 |
+
raise NotImplementedError
|
149 |
+
#return _multi_svd_norm(x, row_axis, col_axis, amin)
|
150 |
+
elif ord == 1:
|
151 |
+
return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0]
|
152 |
+
elif ord == np.inf:
|
153 |
+
return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0]
|
154 |
+
elif ord == -1:
|
155 |
+
return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0]
|
156 |
+
elif ord == -np.inf:
|
157 |
+
return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0]
|
158 |
+
elif ord in (None, 'f', 'fro'):
|
159 |
+
# The axis order does not matter for this norm.
|
160 |
+
return _sparse_frobenius_norm(x)
|
161 |
+
else:
|
162 |
+
raise ValueError("Invalid norm order for matrices.")
|
163 |
+
elif len(axis) == 1:
|
164 |
+
a, = axis
|
165 |
+
if not (-nd <= a < nd):
|
166 |
+
message = f'Invalid axis {axis!r} for an array with shape {x.shape!r}'
|
167 |
+
raise ValueError(message)
|
168 |
+
if ord == np.inf:
|
169 |
+
M = abs(x).max(axis=a)
|
170 |
+
elif ord == -np.inf:
|
171 |
+
M = abs(x).min(axis=a)
|
172 |
+
elif ord == 0:
|
173 |
+
# Zero norm
|
174 |
+
M = (x != 0).sum(axis=a)
|
175 |
+
elif ord == 1:
|
176 |
+
# special case for speedup
|
177 |
+
M = abs(x).sum(axis=a)
|
178 |
+
elif ord in (2, None):
|
179 |
+
M = sqrt(abs(x).power(2).sum(axis=a))
|
180 |
+
else:
|
181 |
+
try:
|
182 |
+
ord + 1
|
183 |
+
except TypeError as e:
|
184 |
+
raise ValueError('Invalid norm order for vectors.') from e
|
185 |
+
M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord)
|
186 |
+
if hasattr(M, 'toarray'):
|
187 |
+
return M.toarray().ravel()
|
188 |
+
elif hasattr(M, 'A'):
|
189 |
+
return M.A.ravel()
|
190 |
+
else:
|
191 |
+
return M.ravel()
|
192 |
+
else:
|
193 |
+
raise ValueError("Improper number of dimensions to norm.")
|
venv/lib/python3.10/site-packages/scipy/sparse/linalg/_onenormest.py
ADDED
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Sparse block 1-norm estimator.
|
2 |
+
"""
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from scipy.sparse.linalg import aslinearoperator
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = ['onenormest']
|
9 |
+
|
10 |
+
|
11 |
+
def onenormest(A, t=2, itmax=5, compute_v=False, compute_w=False):
|
12 |
+
"""
|
13 |
+
Compute a lower bound of the 1-norm of a sparse matrix.
|
14 |
+
|
15 |
+
Parameters
|
16 |
+
----------
|
17 |
+
A : ndarray or other linear operator
|
18 |
+
A linear operator that can be transposed and that can
|
19 |
+
produce matrix products.
|
20 |
+
t : int, optional
|
21 |
+
A positive parameter controlling the tradeoff between
|
22 |
+
accuracy versus time and memory usage.
|
23 |
+
Larger values take longer and use more memory
|
24 |
+
but give more accurate output.
|
25 |
+
itmax : int, optional
|
26 |
+
Use at most this many iterations.
|
27 |
+
compute_v : bool, optional
|
28 |
+
Request a norm-maximizing linear operator input vector if True.
|
29 |
+
compute_w : bool, optional
|
30 |
+
Request a norm-maximizing linear operator output vector if True.
|
31 |
+
|
32 |
+
Returns
|
33 |
+
-------
|
34 |
+
est : float
|
35 |
+
An underestimate of the 1-norm of the sparse matrix.
|
36 |
+
v : ndarray, optional
|
37 |
+
The vector such that ||Av||_1 == est*||v||_1.
|
38 |
+
It can be thought of as an input to the linear operator
|
39 |
+
that gives an output with particularly large norm.
|
40 |
+
w : ndarray, optional
|
41 |
+
The vector Av which has relatively large 1-norm.
|
42 |
+
It can be thought of as an output of the linear operator
|
43 |
+
that is relatively large in norm compared to the input.
|
44 |
+
|
45 |
+
Notes
|
46 |
+
-----
|
47 |
+
This is algorithm 2.4 of [1].
|
48 |
+
|
49 |
+
In [2] it is described as follows.
|
50 |
+
"This algorithm typically requires the evaluation of
|
51 |
+
about 4t matrix-vector products and almost invariably
|
52 |
+
produces a norm estimate (which is, in fact, a lower
|
53 |
+
bound on the norm) correct to within a factor 3."
|
54 |
+
|
55 |
+
.. versionadded:: 0.13.0
|
56 |
+
|
57 |
+
References
|
58 |
+
----------
|
59 |
+
.. [1] Nicholas J. Higham and Francoise Tisseur (2000),
|
60 |
+
"A Block Algorithm for Matrix 1-Norm Estimation,
|
61 |
+
with an Application to 1-Norm Pseudospectra."
|
62 |
+
SIAM J. Matrix Anal. Appl. Vol. 21, No. 4, pp. 1185-1201.
|
63 |
+
|
64 |
+
.. [2] Awad H. Al-Mohy and Nicholas J. Higham (2009),
|
65 |
+
"A new scaling and squaring algorithm for the matrix exponential."
|
66 |
+
SIAM J. Matrix Anal. Appl. Vol. 31, No. 3, pp. 970-989.
|
67 |
+
|
68 |
+
Examples
|
69 |
+
--------
|
70 |
+
>>> import numpy as np
|
71 |
+
>>> from scipy.sparse import csc_matrix
|
72 |
+
>>> from scipy.sparse.linalg import onenormest
|
73 |
+
>>> A = csc_matrix([[1., 0., 0.], [5., 8., 2.], [0., -1., 0.]], dtype=float)
|
74 |
+
>>> A.toarray()
|
75 |
+
array([[ 1., 0., 0.],
|
76 |
+
[ 5., 8., 2.],
|
77 |
+
[ 0., -1., 0.]])
|
78 |
+
>>> onenormest(A)
|
79 |
+
9.0
|
80 |
+
>>> np.linalg.norm(A.toarray(), ord=1)
|
81 |
+
9.0
|
82 |
+
"""
|
83 |
+
|
84 |
+
# Check the input.
|
85 |
+
A = aslinearoperator(A)
|
86 |
+
if A.shape[0] != A.shape[1]:
|
87 |
+
raise ValueError('expected the operator to act like a square matrix')
|
88 |
+
|
89 |
+
# If the operator size is small compared to t,
|
90 |
+
# then it is easier to compute the exact norm.
|
91 |
+
# Otherwise estimate the norm.
|
92 |
+
n = A.shape[1]
|
93 |
+
if t >= n:
|
94 |
+
A_explicit = np.asarray(aslinearoperator(A).matmat(np.identity(n)))
|
95 |
+
if A_explicit.shape != (n, n):
|
96 |
+
raise Exception('internal error: ',
|
97 |
+
'unexpected shape ' + str(A_explicit.shape))
|
98 |
+
col_abs_sums = abs(A_explicit).sum(axis=0)
|
99 |
+
if col_abs_sums.shape != (n, ):
|
100 |
+
raise Exception('internal error: ',
|
101 |
+
'unexpected shape ' + str(col_abs_sums.shape))
|
102 |
+
argmax_j = np.argmax(col_abs_sums)
|
103 |
+
v = elementary_vector(n, argmax_j)
|
104 |
+
w = A_explicit[:, argmax_j]
|
105 |
+
est = col_abs_sums[argmax_j]
|
106 |
+
else:
|
107 |
+
est, v, w, nmults, nresamples = _onenormest_core(A, A.H, t, itmax)
|
108 |
+
|
109 |
+
# Report the norm estimate along with some certificates of the estimate.
|
110 |
+
if compute_v or compute_w:
|
111 |
+
result = (est,)
|
112 |
+
if compute_v:
|
113 |
+
result += (v,)
|
114 |
+
if compute_w:
|
115 |
+
result += (w,)
|
116 |
+
return result
|
117 |
+
else:
|
118 |
+
return est
|
119 |
+
|
120 |
+
|
121 |
+
def _blocked_elementwise(func):
|
122 |
+
"""
|
123 |
+
Decorator for an elementwise function, to apply it blockwise along
|
124 |
+
first dimension, to avoid excessive memory usage in temporaries.
|
125 |
+
"""
|
126 |
+
block_size = 2**20
|
127 |
+
|
128 |
+
def wrapper(x):
|
129 |
+
if x.shape[0] < block_size:
|
130 |
+
return func(x)
|
131 |
+
else:
|
132 |
+
y0 = func(x[:block_size])
|
133 |
+
y = np.zeros((x.shape[0],) + y0.shape[1:], dtype=y0.dtype)
|
134 |
+
y[:block_size] = y0
|
135 |
+
del y0
|
136 |
+
for j in range(block_size, x.shape[0], block_size):
|
137 |
+
y[j:j+block_size] = func(x[j:j+block_size])
|
138 |
+
return y
|
139 |
+
return wrapper
|
140 |
+
|
141 |
+
|
142 |
+
@_blocked_elementwise
|
143 |
+
def sign_round_up(X):
|
144 |
+
"""
|
145 |
+
This should do the right thing for both real and complex matrices.
|
146 |
+
|
147 |
+
From Higham and Tisseur:
|
148 |
+
"Everything in this section remains valid for complex matrices
|
149 |
+
provided that sign(A) is redefined as the matrix (aij / |aij|)
|
150 |
+
(and sign(0) = 1) transposes are replaced by conjugate transposes."
|
151 |
+
|
152 |
+
"""
|
153 |
+
Y = X.copy()
|
154 |
+
Y[Y == 0] = 1
|
155 |
+
Y /= np.abs(Y)
|
156 |
+
return Y
|
157 |
+
|
158 |
+
|
159 |
+
@_blocked_elementwise
|
160 |
+
def _max_abs_axis1(X):
|
161 |
+
return np.max(np.abs(X), axis=1)
|
162 |
+
|
163 |
+
|
164 |
+
def _sum_abs_axis0(X):
|
165 |
+
block_size = 2**20
|
166 |
+
r = None
|
167 |
+
for j in range(0, X.shape[0], block_size):
|
168 |
+
y = np.sum(np.abs(X[j:j+block_size]), axis=0)
|
169 |
+
if r is None:
|
170 |
+
r = y
|
171 |
+
else:
|
172 |
+
r += y
|
173 |
+
return r
|
174 |
+
|
175 |
+
|
176 |
+
def elementary_vector(n, i):
|
177 |
+
v = np.zeros(n, dtype=float)
|
178 |
+
v[i] = 1
|
179 |
+
return v
|
180 |
+
|
181 |
+
|
182 |
+
def vectors_are_parallel(v, w):
|
183 |
+
# Columns are considered parallel when they are equal or negative.
|
184 |
+
# Entries are required to be in {-1, 1},
|
185 |
+
# which guarantees that the magnitudes of the vectors are identical.
|
186 |
+
if v.ndim != 1 or v.shape != w.shape:
|
187 |
+
raise ValueError('expected conformant vectors with entries in {-1,1}')
|
188 |
+
n = v.shape[0]
|
189 |
+
return np.dot(v, w) == n
|
190 |
+
|
191 |
+
|
192 |
+
def every_col_of_X_is_parallel_to_a_col_of_Y(X, Y):
|
193 |
+
for v in X.T:
|
194 |
+
if not any(vectors_are_parallel(v, w) for w in Y.T):
|
195 |
+
return False
|
196 |
+
return True
|
197 |
+
|
198 |
+
|
199 |
+
def column_needs_resampling(i, X, Y=None):
|
200 |
+
# column i of X needs resampling if either
|
201 |
+
# it is parallel to a previous column of X or
|
202 |
+
# it is parallel to a column of Y
|
203 |
+
n, t = X.shape
|
204 |
+
v = X[:, i]
|
205 |
+
if any(vectors_are_parallel(v, X[:, j]) for j in range(i)):
|
206 |
+
return True
|
207 |
+
if Y is not None:
|
208 |
+
if any(vectors_are_parallel(v, w) for w in Y.T):
|
209 |
+
return True
|
210 |
+
return False
|
211 |
+
|
212 |
+
|
213 |
+
def resample_column(i, X):
|
214 |
+
X[:, i] = np.random.randint(0, 2, size=X.shape[0])*2 - 1
|
215 |
+
|
216 |
+
|
217 |
+
def less_than_or_close(a, b):
|
218 |
+
return np.allclose(a, b) or (a < b)
|
219 |
+
|
220 |
+
|
221 |
+
def _algorithm_2_2(A, AT, t):
|
222 |
+
"""
|
223 |
+
This is Algorithm 2.2.
|
224 |
+
|
225 |
+
Parameters
|
226 |
+
----------
|
227 |
+
A : ndarray or other linear operator
|
228 |
+
A linear operator that can produce matrix products.
|
229 |
+
AT : ndarray or other linear operator
|
230 |
+
The transpose of A.
|
231 |
+
t : int, optional
|
232 |
+
A positive parameter controlling the tradeoff between
|
233 |
+
accuracy versus time and memory usage.
|
234 |
+
|
235 |
+
Returns
|
236 |
+
-------
|
237 |
+
g : sequence
|
238 |
+
A non-negative decreasing vector
|
239 |
+
such that g[j] is a lower bound for the 1-norm
|
240 |
+
of the column of A of jth largest 1-norm.
|
241 |
+
The first entry of this vector is therefore a lower bound
|
242 |
+
on the 1-norm of the linear operator A.
|
243 |
+
This sequence has length t.
|
244 |
+
ind : sequence
|
245 |
+
The ith entry of ind is the index of the column A whose 1-norm
|
246 |
+
is given by g[i].
|
247 |
+
This sequence of indices has length t, and its entries are
|
248 |
+
chosen from range(n), possibly with repetition,
|
249 |
+
where n is the order of the operator A.
|
250 |
+
|
251 |
+
Notes
|
252 |
+
-----
|
253 |
+
This algorithm is mainly for testing.
|
254 |
+
It uses the 'ind' array in a way that is similar to
|
255 |
+
its usage in algorithm 2.4. This algorithm 2.2 may be easier to test,
|
256 |
+
so it gives a chance of uncovering bugs related to indexing
|
257 |
+
which could have propagated less noticeably to algorithm 2.4.
|
258 |
+
|
259 |
+
"""
|
260 |
+
A_linear_operator = aslinearoperator(A)
|
261 |
+
AT_linear_operator = aslinearoperator(AT)
|
262 |
+
n = A_linear_operator.shape[0]
|
263 |
+
|
264 |
+
# Initialize the X block with columns of unit 1-norm.
|
265 |
+
X = np.ones((n, t))
|
266 |
+
if t > 1:
|
267 |
+
X[:, 1:] = np.random.randint(0, 2, size=(n, t-1))*2 - 1
|
268 |
+
X /= float(n)
|
269 |
+
|
270 |
+
# Iteratively improve the lower bounds.
|
271 |
+
# Track extra things, to assert invariants for debugging.
|
272 |
+
g_prev = None
|
273 |
+
h_prev = None
|
274 |
+
k = 1
|
275 |
+
ind = range(t)
|
276 |
+
while True:
|
277 |
+
Y = np.asarray(A_linear_operator.matmat(X))
|
278 |
+
g = _sum_abs_axis0(Y)
|
279 |
+
best_j = np.argmax(g)
|
280 |
+
g.sort()
|
281 |
+
g = g[::-1]
|
282 |
+
S = sign_round_up(Y)
|
283 |
+
Z = np.asarray(AT_linear_operator.matmat(S))
|
284 |
+
h = _max_abs_axis1(Z)
|
285 |
+
|
286 |
+
# If this algorithm runs for fewer than two iterations,
|
287 |
+
# then its return values do not have the properties indicated
|
288 |
+
# in the description of the algorithm.
|
289 |
+
# In particular, the entries of g are not 1-norms of any
|
290 |
+
# column of A until the second iteration.
|
291 |
+
# Therefore we will require the algorithm to run for at least
|
292 |
+
# two iterations, even though this requirement is not stated
|
293 |
+
# in the description of the algorithm.
|
294 |
+
if k >= 2:
|
295 |
+
if less_than_or_close(max(h), np.dot(Z[:, best_j], X[:, best_j])):
|
296 |
+
break
|
297 |
+
ind = np.argsort(h)[::-1][:t]
|
298 |
+
h = h[ind]
|
299 |
+
for j in range(t):
|
300 |
+
X[:, j] = elementary_vector(n, ind[j])
|
301 |
+
|
302 |
+
# Check invariant (2.2).
|
303 |
+
if k >= 2:
|
304 |
+
if not less_than_or_close(g_prev[0], h_prev[0]):
|
305 |
+
raise Exception('invariant (2.2) is violated')
|
306 |
+
if not less_than_or_close(h_prev[0], g[0]):
|
307 |
+
raise Exception('invariant (2.2) is violated')
|
308 |
+
|
309 |
+
# Check invariant (2.3).
|
310 |
+
if k >= 3:
|
311 |
+
for j in range(t):
|
312 |
+
if not less_than_or_close(g[j], g_prev[j]):
|
313 |
+
raise Exception('invariant (2.3) is violated')
|
314 |
+
|
315 |
+
# Update for the next iteration.
|
316 |
+
g_prev = g
|
317 |
+
h_prev = h
|
318 |
+
k += 1
|
319 |
+
|
320 |
+
# Return the lower bounds and the corresponding column indices.
|
321 |
+
return g, ind
|
322 |
+
|
323 |
+
|
324 |
+
def _onenormest_core(A, AT, t, itmax):
|
325 |
+
"""
|
326 |
+
Compute a lower bound of the 1-norm of a sparse matrix.
|
327 |
+
|
328 |
+
Parameters
|
329 |
+
----------
|
330 |
+
A : ndarray or other linear operator
|
331 |
+
A linear operator that can produce matrix products.
|
332 |
+
AT : ndarray or other linear operator
|
333 |
+
The transpose of A.
|
334 |
+
t : int, optional
|
335 |
+
A positive parameter controlling the tradeoff between
|
336 |
+
accuracy versus time and memory usage.
|
337 |
+
itmax : int, optional
|
338 |
+
Use at most this many iterations.
|
339 |
+
|
340 |
+
Returns
|
341 |
+
-------
|
342 |
+
est : float
|
343 |
+
An underestimate of the 1-norm of the sparse matrix.
|
344 |
+
v : ndarray, optional
|
345 |
+
The vector such that ||Av||_1 == est*||v||_1.
|
346 |
+
It can be thought of as an input to the linear operator
|
347 |
+
that gives an output with particularly large norm.
|
348 |
+
w : ndarray, optional
|
349 |
+
The vector Av which has relatively large 1-norm.
|
350 |
+
It can be thought of as an output of the linear operator
|
351 |
+
that is relatively large in norm compared to the input.
|
352 |
+
nmults : int, optional
|
353 |
+
The number of matrix products that were computed.
|
354 |
+
nresamples : int, optional
|
355 |
+
The number of times a parallel column was observed,
|
356 |
+
necessitating a re-randomization of the column.
|
357 |
+
|
358 |
+
Notes
|
359 |
+
-----
|
360 |
+
This is algorithm 2.4.
|
361 |
+
|
362 |
+
"""
|
363 |
+
# This function is a more or less direct translation
|
364 |
+
# of Algorithm 2.4 from the Higham and Tisseur (2000) paper.
|
365 |
+
A_linear_operator = aslinearoperator(A)
|
366 |
+
AT_linear_operator = aslinearoperator(AT)
|
367 |
+
if itmax < 2:
|
368 |
+
raise ValueError('at least two iterations are required')
|
369 |
+
if t < 1:
|
370 |
+
raise ValueError('at least one column is required')
|
371 |
+
n = A.shape[0]
|
372 |
+
if t >= n:
|
373 |
+
raise ValueError('t should be smaller than the order of A')
|
374 |
+
# Track the number of big*small matrix multiplications
|
375 |
+
# and the number of resamplings.
|
376 |
+
nmults = 0
|
377 |
+
nresamples = 0
|
378 |
+
# "We now explain our choice of starting matrix. We take the first
|
379 |
+
# column of X to be the vector of 1s [...] This has the advantage that
|
380 |
+
# for a matrix with nonnegative elements the algorithm converges
|
381 |
+
# with an exact estimate on the second iteration, and such matrices
|
382 |
+
# arise in applications [...]"
|
383 |
+
X = np.ones((n, t), dtype=float)
|
384 |
+
# "The remaining columns are chosen as rand{-1,1},
|
385 |
+
# with a check for and correction of parallel columns,
|
386 |
+
# exactly as for S in the body of the algorithm."
|
387 |
+
if t > 1:
|
388 |
+
for i in range(1, t):
|
389 |
+
# These are technically initial samples, not resamples,
|
390 |
+
# so the resampling count is not incremented.
|
391 |
+
resample_column(i, X)
|
392 |
+
for i in range(t):
|
393 |
+
while column_needs_resampling(i, X):
|
394 |
+
resample_column(i, X)
|
395 |
+
nresamples += 1
|
396 |
+
# "Choose starting matrix X with columns of unit 1-norm."
|
397 |
+
X /= float(n)
|
398 |
+
# "indices of used unit vectors e_j"
|
399 |
+
ind_hist = np.zeros(0, dtype=np.intp)
|
400 |
+
est_old = 0
|
401 |
+
S = np.zeros((n, t), dtype=float)
|
402 |
+
k = 1
|
403 |
+
ind = None
|
404 |
+
while True:
|
405 |
+
Y = np.asarray(A_linear_operator.matmat(X))
|
406 |
+
nmults += 1
|
407 |
+
mags = _sum_abs_axis0(Y)
|
408 |
+
est = np.max(mags)
|
409 |
+
best_j = np.argmax(mags)
|
410 |
+
if est > est_old or k == 2:
|
411 |
+
if k >= 2:
|
412 |
+
ind_best = ind[best_j]
|
413 |
+
w = Y[:, best_j]
|
414 |
+
# (1)
|
415 |
+
if k >= 2 and est <= est_old:
|
416 |
+
est = est_old
|
417 |
+
break
|
418 |
+
est_old = est
|
419 |
+
S_old = S
|
420 |
+
if k > itmax:
|
421 |
+
break
|
422 |
+
S = sign_round_up(Y)
|
423 |
+
del Y
|
424 |
+
# (2)
|
425 |
+
if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old):
|
426 |
+
break
|
427 |
+
if t > 1:
|
428 |
+
# "Ensure that no column of S is parallel to another column of S
|
429 |
+
# or to a column of S_old by replacing columns of S by rand{-1,1}."
|
430 |
+
for i in range(t):
|
431 |
+
while column_needs_resampling(i, S, S_old):
|
432 |
+
resample_column(i, S)
|
433 |
+
nresamples += 1
|
434 |
+
del S_old
|
435 |
+
# (3)
|
436 |
+
Z = np.asarray(AT_linear_operator.matmat(S))
|
437 |
+
nmults += 1
|
438 |
+
h = _max_abs_axis1(Z)
|
439 |
+
del Z
|
440 |
+
# (4)
|
441 |
+
if k >= 2 and max(h) == h[ind_best]:
|
442 |
+
break
|
443 |
+
# "Sort h so that h_first >= ... >= h_last
|
444 |
+
# and re-order ind correspondingly."
|
445 |
+
#
|
446 |
+
# Later on, we will need at most t+len(ind_hist) largest
|
447 |
+
# entries, so drop the rest
|
448 |
+
ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy()
|
449 |
+
del h
|
450 |
+
if t > 1:
|
451 |
+
# (5)
|
452 |
+
# Break if the most promising t vectors have been visited already.
|
453 |
+
if np.isin(ind[:t], ind_hist).all():
|
454 |
+
break
|
455 |
+
# Put the most promising unvisited vectors at the front of the list
|
456 |
+
# and put the visited vectors at the end of the list.
|
457 |
+
# Preserve the order of the indices induced by the ordering of h.
|
458 |
+
seen = np.isin(ind, ind_hist)
|
459 |
+
ind = np.concatenate((ind[~seen], ind[seen]))
|
460 |
+
for j in range(t):
|
461 |
+
X[:, j] = elementary_vector(n, ind[j])
|
462 |
+
|
463 |
+
new_ind = ind[:t][~np.isin(ind[:t], ind_hist)]
|
464 |
+
ind_hist = np.concatenate((ind_hist, new_ind))
|
465 |
+
k += 1
|
466 |
+
v = elementary_vector(n, ind_best)
|
467 |
+
return est, v, w, nmults, nresamples
|
venv/lib/python3.10/site-packages/scipy/sparse/linalg/_special_sparse_arrays.py
ADDED
@@ -0,0 +1,948 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from scipy.sparse.linalg import LinearOperator
|
3 |
+
from scipy.sparse import kron, eye, dia_array
|
4 |
+
|
5 |
+
__all__ = ['LaplacianNd']
|
6 |
+
# Sakurai and Mikota classes are intended for tests and benchmarks
|
7 |
+
# and explicitly not included in the public API of this module.
|
8 |
+
|
9 |
+
|
10 |
+
class LaplacianNd(LinearOperator):
|
11 |
+
"""
|
12 |
+
The grid Laplacian in ``N`` dimensions and its eigenvalues/eigenvectors.
|
13 |
+
|
14 |
+
Construct Laplacian on a uniform rectangular grid in `N` dimensions
|
15 |
+
and output its eigenvalues and eigenvectors.
|
16 |
+
The Laplacian ``L`` is square, negative definite, real symmetric array
|
17 |
+
with signed integer entries and zeros otherwise.
|
18 |
+
|
19 |
+
Parameters
|
20 |
+
----------
|
21 |
+
grid_shape : tuple
|
22 |
+
A tuple of integers of length ``N`` (corresponding to the dimension of
|
23 |
+
the Lapacian), where each entry gives the size of that dimension. The
|
24 |
+
Laplacian matrix is square of the size ``np.prod(grid_shape)``.
|
25 |
+
boundary_conditions : {'neumann', 'dirichlet', 'periodic'}, optional
|
26 |
+
The type of the boundary conditions on the boundaries of the grid.
|
27 |
+
Valid values are ``'dirichlet'`` or ``'neumann'``(default) or
|
28 |
+
``'periodic'``.
|
29 |
+
dtype : dtype
|
30 |
+
Numerical type of the array. Default is ``np.int8``.
|
31 |
+
|
32 |
+
Methods
|
33 |
+
-------
|
34 |
+
toarray()
|
35 |
+
Construct a dense array from Laplacian data
|
36 |
+
tosparse()
|
37 |
+
Construct a sparse array from Laplacian data
|
38 |
+
eigenvalues(m=None)
|
39 |
+
Construct a 1D array of `m` largest (smallest in absolute value)
|
40 |
+
eigenvalues of the Laplacian matrix in ascending order.
|
41 |
+
eigenvectors(m=None):
|
42 |
+
Construct the array with columns made of `m` eigenvectors (``float``)
|
43 |
+
of the ``Nd`` Laplacian corresponding to the `m` ordered eigenvalues.
|
44 |
+
|
45 |
+
.. versionadded:: 1.12.0
|
46 |
+
|
47 |
+
Notes
|
48 |
+
-----
|
49 |
+
Compared to the MATLAB/Octave implementation [1] of 1-, 2-, and 3-D
|
50 |
+
Laplacian, this code allows the arbitrary N-D case and the matrix-free
|
51 |
+
callable option, but is currently limited to pure Dirichlet, Neumann or
|
52 |
+
Periodic boundary conditions only.
|
53 |
+
|
54 |
+
The Laplacian matrix of a graph (`scipy.sparse.csgraph.laplacian`) of a
|
55 |
+
rectangular grid corresponds to the negative Laplacian with the Neumann
|
56 |
+
conditions, i.e., ``boundary_conditions = 'neumann'``.
|
57 |
+
|
58 |
+
All eigenvalues and eigenvectors of the discrete Laplacian operator for
|
59 |
+
an ``N``-dimensional regular grid of shape `grid_shape` with the grid
|
60 |
+
step size ``h=1`` are analytically known [2].
|
61 |
+
|
62 |
+
References
|
63 |
+
----------
|
64 |
+
.. [1] https://github.com/lobpcg/blopex/blob/master/blopex_\
|
65 |
+
tools/matlab/laplacian/laplacian.m
|
66 |
+
.. [2] "Eigenvalues and eigenvectors of the second derivative", Wikipedia
|
67 |
+
https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors_\
|
68 |
+
of_the_second_derivative
|
69 |
+
|
70 |
+
Examples
|
71 |
+
--------
|
72 |
+
>>> import numpy as np
|
73 |
+
>>> from scipy.sparse.linalg import LaplacianNd
|
74 |
+
>>> from scipy.sparse import diags, csgraph
|
75 |
+
>>> from scipy.linalg import eigvalsh
|
76 |
+
|
77 |
+
The one-dimensional Laplacian demonstrated below for pure Neumann boundary
|
78 |
+
conditions on a regular grid with ``n=6`` grid points is exactly the
|
79 |
+
negative graph Laplacian for the undirected linear graph with ``n``
|
80 |
+
vertices using the sparse adjacency matrix ``G`` represented by the
|
81 |
+
famous tri-diagonal matrix:
|
82 |
+
|
83 |
+
>>> n = 6
|
84 |
+
>>> G = diags(np.ones(n - 1), 1, format='csr')
|
85 |
+
>>> Lf = csgraph.laplacian(G, symmetrized=True, form='function')
|
86 |
+
>>> grid_shape = (n, )
|
87 |
+
>>> lap = LaplacianNd(grid_shape, boundary_conditions='neumann')
|
88 |
+
>>> np.array_equal(lap.matmat(np.eye(n)), -Lf(np.eye(n)))
|
89 |
+
True
|
90 |
+
|
91 |
+
Since all matrix entries of the Laplacian are integers, ``'int8'`` is
|
92 |
+
the default dtype for storing matrix representations.
|
93 |
+
|
94 |
+
>>> lap.tosparse()
|
95 |
+
<6x6 sparse array of type '<class 'numpy.int8'>'
|
96 |
+
with 16 stored elements (3 diagonals) in DIAgonal format>
|
97 |
+
>>> lap.toarray()
|
98 |
+
array([[-1, 1, 0, 0, 0, 0],
|
99 |
+
[ 1, -2, 1, 0, 0, 0],
|
100 |
+
[ 0, 1, -2, 1, 0, 0],
|
101 |
+
[ 0, 0, 1, -2, 1, 0],
|
102 |
+
[ 0, 0, 0, 1, -2, 1],
|
103 |
+
[ 0, 0, 0, 0, 1, -1]], dtype=int8)
|
104 |
+
>>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray())
|
105 |
+
True
|
106 |
+
>>> np.array_equal(lap.tosparse().toarray(), lap.toarray())
|
107 |
+
True
|
108 |
+
|
109 |
+
Any number of extreme eigenvalues and/or eigenvectors can be computed.
|
110 |
+
|
111 |
+
>>> lap = LaplacianNd(grid_shape, boundary_conditions='periodic')
|
112 |
+
>>> lap.eigenvalues()
|
113 |
+
array([-4., -3., -3., -1., -1., 0.])
|
114 |
+
>>> lap.eigenvalues()[-2:]
|
115 |
+
array([-1., 0.])
|
116 |
+
>>> lap.eigenvalues(2)
|
117 |
+
array([-1., 0.])
|
118 |
+
>>> lap.eigenvectors(1)
|
119 |
+
array([[0.40824829],
|
120 |
+
[0.40824829],
|
121 |
+
[0.40824829],
|
122 |
+
[0.40824829],
|
123 |
+
[0.40824829],
|
124 |
+
[0.40824829]])
|
125 |
+
>>> lap.eigenvectors(2)
|
126 |
+
array([[ 0.5 , 0.40824829],
|
127 |
+
[ 0. , 0.40824829],
|
128 |
+
[-0.5 , 0.40824829],
|
129 |
+
[-0.5 , 0.40824829],
|
130 |
+
[ 0. , 0.40824829],
|
131 |
+
[ 0.5 , 0.40824829]])
|
132 |
+
>>> lap.eigenvectors()
|
133 |
+
array([[ 0.40824829, 0.28867513, 0.28867513, 0.5 , 0.5 ,
|
134 |
+
0.40824829],
|
135 |
+
[-0.40824829, -0.57735027, -0.57735027, 0. , 0. ,
|
136 |
+
0.40824829],
|
137 |
+
[ 0.40824829, 0.28867513, 0.28867513, -0.5 , -0.5 ,
|
138 |
+
0.40824829],
|
139 |
+
[-0.40824829, 0.28867513, 0.28867513, -0.5 , -0.5 ,
|
140 |
+
0.40824829],
|
141 |
+
[ 0.40824829, -0.57735027, -0.57735027, 0. , 0. ,
|
142 |
+
0.40824829],
|
143 |
+
[-0.40824829, 0.28867513, 0.28867513, 0.5 , 0.5 ,
|
144 |
+
0.40824829]])
|
145 |
+
|
146 |
+
The two-dimensional Laplacian is illustrated on a regular grid with
|
147 |
+
``grid_shape = (2, 3)`` points in each dimension.
|
148 |
+
|
149 |
+
>>> grid_shape = (2, 3)
|
150 |
+
>>> n = np.prod(grid_shape)
|
151 |
+
|
152 |
+
Numeration of grid points is as follows:
|
153 |
+
|
154 |
+
>>> np.arange(n).reshape(grid_shape + (-1,))
|
155 |
+
array([[[0],
|
156 |
+
[1],
|
157 |
+
[2]],
|
158 |
+
<BLANKLINE>
|
159 |
+
[[3],
|
160 |
+
[4],
|
161 |
+
[5]]])
|
162 |
+
|
163 |
+
Each of the boundary conditions ``'dirichlet'``, ``'periodic'``, and
|
164 |
+
``'neumann'`` is illustrated separately; with ``'dirichlet'``
|
165 |
+
|
166 |
+
>>> lap = LaplacianNd(grid_shape, boundary_conditions='dirichlet')
|
167 |
+
>>> lap.tosparse()
|
168 |
+
<6x6 sparse array of type '<class 'numpy.int8'>'
|
169 |
+
with 20 stored elements in Compressed Sparse Row format>
|
170 |
+
>>> lap.toarray()
|
171 |
+
array([[-4, 1, 0, 1, 0, 0],
|
172 |
+
[ 1, -4, 1, 0, 1, 0],
|
173 |
+
[ 0, 1, -4, 0, 0, 1],
|
174 |
+
[ 1, 0, 0, -4, 1, 0],
|
175 |
+
[ 0, 1, 0, 1, -4, 1],
|
176 |
+
[ 0, 0, 1, 0, 1, -4]], dtype=int8)
|
177 |
+
>>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray())
|
178 |
+
True
|
179 |
+
>>> np.array_equal(lap.tosparse().toarray(), lap.toarray())
|
180 |
+
True
|
181 |
+
>>> lap.eigenvalues()
|
182 |
+
array([-6.41421356, -5. , -4.41421356, -3.58578644, -3. ,
|
183 |
+
-1.58578644])
|
184 |
+
>>> eigvals = eigvalsh(lap.toarray().astype(np.float64))
|
185 |
+
>>> np.allclose(lap.eigenvalues(), eigvals)
|
186 |
+
True
|
187 |
+
>>> np.allclose(lap.toarray() @ lap.eigenvectors(),
|
188 |
+
... lap.eigenvectors() @ np.diag(lap.eigenvalues()))
|
189 |
+
True
|
190 |
+
|
191 |
+
with ``'periodic'``
|
192 |
+
|
193 |
+
>>> lap = LaplacianNd(grid_shape, boundary_conditions='periodic')
|
194 |
+
>>> lap.tosparse()
|
195 |
+
<6x6 sparse array of type '<class 'numpy.int8'>'
|
196 |
+
with 24 stored elements in Compressed Sparse Row format>
|
197 |
+
>>> lap.toarray()
|
198 |
+
array([[-4, 1, 1, 2, 0, 0],
|
199 |
+
[ 1, -4, 1, 0, 2, 0],
|
200 |
+
[ 1, 1, -4, 0, 0, 2],
|
201 |
+
[ 2, 0, 0, -4, 1, 1],
|
202 |
+
[ 0, 2, 0, 1, -4, 1],
|
203 |
+
[ 0, 0, 2, 1, 1, -4]], dtype=int8)
|
204 |
+
>>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray())
|
205 |
+
True
|
206 |
+
>>> np.array_equal(lap.tosparse().toarray(), lap.toarray())
|
207 |
+
True
|
208 |
+
>>> lap.eigenvalues()
|
209 |
+
array([-7., -7., -4., -3., -3., 0.])
|
210 |
+
>>> eigvals = eigvalsh(lap.toarray().astype(np.float64))
|
211 |
+
>>> np.allclose(lap.eigenvalues(), eigvals)
|
212 |
+
True
|
213 |
+
>>> np.allclose(lap.toarray() @ lap.eigenvectors(),
|
214 |
+
... lap.eigenvectors() @ np.diag(lap.eigenvalues()))
|
215 |
+
True
|
216 |
+
|
217 |
+
and with ``'neumann'``
|
218 |
+
|
219 |
+
>>> lap = LaplacianNd(grid_shape, boundary_conditions='neumann')
|
220 |
+
>>> lap.tosparse()
|
221 |
+
<6x6 sparse array of type '<class 'numpy.int8'>'
|
222 |
+
with 20 stored elements in Compressed Sparse Row format>
|
223 |
+
>>> lap.toarray()
|
224 |
+
array([[-2, 1, 0, 1, 0, 0],
|
225 |
+
[ 1, -3, 1, 0, 1, 0],
|
226 |
+
[ 0, 1, -2, 0, 0, 1],
|
227 |
+
[ 1, 0, 0, -2, 1, 0],
|
228 |
+
[ 0, 1, 0, 1, -3, 1],
|
229 |
+
[ 0, 0, 1, 0, 1, -2]])
|
230 |
+
>>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray())
|
231 |
+
True
|
232 |
+
>>> np.array_equal(lap.tosparse().toarray(), lap.toarray())
|
233 |
+
True
|
234 |
+
>>> lap.eigenvalues()
|
235 |
+
array([-5., -3., -3., -2., -1., 0.])
|
236 |
+
>>> eigvals = eigvalsh(lap.toarray().astype(np.float64))
|
237 |
+
>>> np.allclose(lap.eigenvalues(), eigvals)
|
238 |
+
True
|
239 |
+
>>> np.allclose(lap.toarray() @ lap.eigenvectors(),
|
240 |
+
... lap.eigenvectors() @ np.diag(lap.eigenvalues()))
|
241 |
+
True
|
242 |
+
|
243 |
+
"""
|
244 |
+
|
245 |
+
def __init__(self, grid_shape, *,
|
246 |
+
boundary_conditions='neumann',
|
247 |
+
dtype=np.int8):
|
248 |
+
|
249 |
+
if boundary_conditions not in ('dirichlet', 'neumann', 'periodic'):
|
250 |
+
raise ValueError(
|
251 |
+
f"Unknown value {boundary_conditions!r} is given for "
|
252 |
+
"'boundary_conditions' parameter. The valid options are "
|
253 |
+
"'dirichlet', 'periodic', and 'neumann' (default)."
|
254 |
+
)
|
255 |
+
|
256 |
+
self.grid_shape = grid_shape
|
257 |
+
self.boundary_conditions = boundary_conditions
|
258 |
+
# LaplacianNd folds all dimensions in `grid_shape` into a single one
|
259 |
+
N = np.prod(grid_shape)
|
260 |
+
super().__init__(dtype=dtype, shape=(N, N))
|
261 |
+
|
262 |
+
def _eigenvalue_ordering(self, m):
|
263 |
+
"""Compute `m` largest eigenvalues in each of the ``N`` directions,
|
264 |
+
i.e., up to ``m * N`` total, order them and return `m` largest.
|
265 |
+
"""
|
266 |
+
grid_shape = self.grid_shape
|
267 |
+
if m is None:
|
268 |
+
indices = np.indices(grid_shape)
|
269 |
+
Leig = np.zeros(grid_shape)
|
270 |
+
else:
|
271 |
+
grid_shape_min = min(grid_shape,
|
272 |
+
tuple(np.ones_like(grid_shape) * m))
|
273 |
+
indices = np.indices(grid_shape_min)
|
274 |
+
Leig = np.zeros(grid_shape_min)
|
275 |
+
|
276 |
+
for j, n in zip(indices, grid_shape):
|
277 |
+
if self.boundary_conditions == 'dirichlet':
|
278 |
+
Leig += -4 * np.sin(np.pi * (j + 1) / (2 * (n + 1))) ** 2
|
279 |
+
elif self.boundary_conditions == 'neumann':
|
280 |
+
Leig += -4 * np.sin(np.pi * j / (2 * n)) ** 2
|
281 |
+
else: # boundary_conditions == 'periodic'
|
282 |
+
Leig += -4 * np.sin(np.pi * np.floor((j + 1) / 2) / n) ** 2
|
283 |
+
|
284 |
+
Leig_ravel = Leig.ravel()
|
285 |
+
ind = np.argsort(Leig_ravel)
|
286 |
+
eigenvalues = Leig_ravel[ind]
|
287 |
+
if m is not None:
|
288 |
+
eigenvalues = eigenvalues[-m:]
|
289 |
+
ind = ind[-m:]
|
290 |
+
|
291 |
+
return eigenvalues, ind
|
292 |
+
|
293 |
+
def eigenvalues(self, m=None):
|
294 |
+
"""Return the requested number of eigenvalues.
|
295 |
+
|
296 |
+
Parameters
|
297 |
+
----------
|
298 |
+
m : int, optional
|
299 |
+
The positive number of smallest eigenvalues to return.
|
300 |
+
If not provided, then all eigenvalues will be returned.
|
301 |
+
|
302 |
+
Returns
|
303 |
+
-------
|
304 |
+
eigenvalues : float array
|
305 |
+
The requested `m` smallest or all eigenvalues, in ascending order.
|
306 |
+
"""
|
307 |
+
eigenvalues, _ = self._eigenvalue_ordering(m)
|
308 |
+
return eigenvalues
|
309 |
+
|
310 |
+
def _ev1d(self, j, n):
|
311 |
+
"""Return 1 eigenvector in 1d with index `j`
|
312 |
+
and number of grid points `n` where ``j < n``.
|
313 |
+
"""
|
314 |
+
if self.boundary_conditions == 'dirichlet':
|
315 |
+
i = np.pi * (np.arange(n) + 1) / (n + 1)
|
316 |
+
ev = np.sqrt(2. / (n + 1.)) * np.sin(i * (j + 1))
|
317 |
+
elif self.boundary_conditions == 'neumann':
|
318 |
+
i = np.pi * (np.arange(n) + 0.5) / n
|
319 |
+
ev = np.sqrt((1. if j == 0 else 2.) / n) * np.cos(i * j)
|
320 |
+
else: # boundary_conditions == 'periodic'
|
321 |
+
if j == 0:
|
322 |
+
ev = np.sqrt(1. / n) * np.ones(n)
|
323 |
+
elif j + 1 == n and n % 2 == 0:
|
324 |
+
ev = np.sqrt(1. / n) * np.tile([1, -1], n//2)
|
325 |
+
else:
|
326 |
+
i = 2. * np.pi * (np.arange(n) + 0.5) / n
|
327 |
+
ev = np.sqrt(2. / n) * np.cos(i * np.floor((j + 1) / 2))
|
328 |
+
# make small values exact zeros correcting round-off errors
|
329 |
+
# due to symmetry of eigenvectors the exact 0. is correct
|
330 |
+
ev[np.abs(ev) < np.finfo(np.float64).eps] = 0.
|
331 |
+
return ev
|
332 |
+
|
333 |
+
def _one_eve(self, k):
|
334 |
+
"""Return 1 eigenvector in Nd with multi-index `j`
|
335 |
+
as a tensor product of the corresponding 1d eigenvectors.
|
336 |
+
"""
|
337 |
+
phi = [self._ev1d(j, n) for j, n in zip(k, self.grid_shape)]
|
338 |
+
result = phi[0]
|
339 |
+
for phi in phi[1:]:
|
340 |
+
result = np.tensordot(result, phi, axes=0)
|
341 |
+
return np.asarray(result).ravel()
|
342 |
+
|
343 |
+
def eigenvectors(self, m=None):
|
344 |
+
"""Return the requested number of eigenvectors for ordered eigenvalues.
|
345 |
+
|
346 |
+
Parameters
|
347 |
+
----------
|
348 |
+
m : int, optional
|
349 |
+
The positive number of eigenvectors to return. If not provided,
|
350 |
+
then all eigenvectors will be returned.
|
351 |
+
|
352 |
+
Returns
|
353 |
+
-------
|
354 |
+
eigenvectors : float array
|
355 |
+
An array with columns made of the requested `m` or all eigenvectors.
|
356 |
+
The columns are ordered according to the `m` ordered eigenvalues.
|
357 |
+
"""
|
358 |
+
_, ind = self._eigenvalue_ordering(m)
|
359 |
+
if m is None:
|
360 |
+
grid_shape_min = self.grid_shape
|
361 |
+
else:
|
362 |
+
grid_shape_min = min(self.grid_shape,
|
363 |
+
tuple(np.ones_like(self.grid_shape) * m))
|
364 |
+
|
365 |
+
N_indices = np.unravel_index(ind, grid_shape_min)
|
366 |
+
N_indices = [tuple(x) for x in zip(*N_indices)]
|
367 |
+
eigenvectors_list = [self._one_eve(k) for k in N_indices]
|
368 |
+
return np.column_stack(eigenvectors_list)
|
369 |
+
|
370 |
+
def toarray(self):
|
371 |
+
"""
|
372 |
+
Converts the Laplacian data to a dense array.
|
373 |
+
|
374 |
+
Returns
|
375 |
+
-------
|
376 |
+
L : ndarray
|
377 |
+
The shape is ``(N, N)`` where ``N = np.prod(grid_shape)``.
|
378 |
+
|
379 |
+
"""
|
380 |
+
grid_shape = self.grid_shape
|
381 |
+
n = np.prod(grid_shape)
|
382 |
+
L = np.zeros([n, n], dtype=np.int8)
|
383 |
+
# Scratch arrays
|
384 |
+
L_i = np.empty_like(L)
|
385 |
+
Ltemp = np.empty_like(L)
|
386 |
+
|
387 |
+
for ind, dim in enumerate(grid_shape):
|
388 |
+
# Start zeroing out L_i
|
389 |
+
L_i[:] = 0
|
390 |
+
# Allocate the top left corner with the kernel of L_i
|
391 |
+
# Einsum returns writable view of arrays
|
392 |
+
np.einsum("ii->i", L_i[:dim, :dim])[:] = -2
|
393 |
+
np.einsum("ii->i", L_i[: dim - 1, 1:dim])[:] = 1
|
394 |
+
np.einsum("ii->i", L_i[1:dim, : dim - 1])[:] = 1
|
395 |
+
|
396 |
+
if self.boundary_conditions == 'neumann':
|
397 |
+
L_i[0, 0] = -1
|
398 |
+
L_i[dim - 1, dim - 1] = -1
|
399 |
+
elif self.boundary_conditions == 'periodic':
|
400 |
+
if dim > 1:
|
401 |
+
L_i[0, dim - 1] += 1
|
402 |
+
L_i[dim - 1, 0] += 1
|
403 |
+
else:
|
404 |
+
L_i[0, 0] += 1
|
405 |
+
|
406 |
+
# kron is too slow for large matrices hence the next two tricks
|
407 |
+
# 1- kron(eye, mat) is block_diag(mat, mat, ...)
|
408 |
+
# 2- kron(mat, eye) can be performed by 4d stride trick
|
409 |
+
|
410 |
+
# 1-
|
411 |
+
new_dim = dim
|
412 |
+
# for block_diag we tile the top left portion on the diagonal
|
413 |
+
if ind > 0:
|
414 |
+
tiles = np.prod(grid_shape[:ind])
|
415 |
+
for j in range(1, tiles):
|
416 |
+
L_i[j*dim:(j+1)*dim, j*dim:(j+1)*dim] = L_i[:dim, :dim]
|
417 |
+
new_dim += dim
|
418 |
+
# 2-
|
419 |
+
# we need the keep L_i, but reset the array
|
420 |
+
Ltemp[:new_dim, :new_dim] = L_i[:new_dim, :new_dim]
|
421 |
+
tiles = int(np.prod(grid_shape[ind+1:]))
|
422 |
+
# Zero out the top left, the rest is already 0
|
423 |
+
L_i[:new_dim, :new_dim] = 0
|
424 |
+
idx = [x for x in range(tiles)]
|
425 |
+
L_i.reshape(
|
426 |
+
(new_dim, tiles,
|
427 |
+
new_dim, tiles)
|
428 |
+
)[:, idx, :, idx] = Ltemp[:new_dim, :new_dim]
|
429 |
+
|
430 |
+
L += L_i
|
431 |
+
|
432 |
+
return L.astype(self.dtype)
|
433 |
+
|
434 |
+
def tosparse(self):
|
435 |
+
"""
|
436 |
+
Constructs a sparse array from the Laplacian data. The returned sparse
|
437 |
+
array format is dependent on the selected boundary conditions.
|
438 |
+
|
439 |
+
Returns
|
440 |
+
-------
|
441 |
+
L : scipy.sparse.sparray
|
442 |
+
The shape is ``(N, N)`` where ``N = np.prod(grid_shape)``.
|
443 |
+
|
444 |
+
"""
|
445 |
+
N = len(self.grid_shape)
|
446 |
+
p = np.prod(self.grid_shape)
|
447 |
+
L = dia_array((p, p), dtype=np.int8)
|
448 |
+
|
449 |
+
for i in range(N):
|
450 |
+
dim = self.grid_shape[i]
|
451 |
+
data = np.ones([3, dim], dtype=np.int8)
|
452 |
+
data[1, :] *= -2
|
453 |
+
|
454 |
+
if self.boundary_conditions == 'neumann':
|
455 |
+
data[1, 0] = -1
|
456 |
+
data[1, -1] = -1
|
457 |
+
|
458 |
+
L_i = dia_array((data, [-1, 0, 1]), shape=(dim, dim),
|
459 |
+
dtype=np.int8
|
460 |
+
)
|
461 |
+
|
462 |
+
if self.boundary_conditions == 'periodic':
|
463 |
+
t = dia_array((dim, dim), dtype=np.int8)
|
464 |
+
t.setdiag([1], k=-dim+1)
|
465 |
+
t.setdiag([1], k=dim-1)
|
466 |
+
L_i += t
|
467 |
+
|
468 |
+
for j in range(i):
|
469 |
+
L_i = kron(eye(self.grid_shape[j], dtype=np.int8), L_i)
|
470 |
+
for j in range(i + 1, N):
|
471 |
+
L_i = kron(L_i, eye(self.grid_shape[j], dtype=np.int8))
|
472 |
+
L += L_i
|
473 |
+
return L.astype(self.dtype)
|
474 |
+
|
475 |
+
def _matvec(self, x):
|
476 |
+
grid_shape = self.grid_shape
|
477 |
+
N = len(grid_shape)
|
478 |
+
X = x.reshape(grid_shape + (-1,))
|
479 |
+
Y = -2 * N * X
|
480 |
+
for i in range(N):
|
481 |
+
Y += np.roll(X, 1, axis=i)
|
482 |
+
Y += np.roll(X, -1, axis=i)
|
483 |
+
if self.boundary_conditions in ('neumann', 'dirichlet'):
|
484 |
+
Y[(slice(None),)*i + (0,) + (slice(None),)*(N-i-1)
|
485 |
+
] -= np.roll(X, 1, axis=i)[
|
486 |
+
(slice(None),) * i + (0,) + (slice(None),) * (N-i-1)
|
487 |
+
]
|
488 |
+
Y[
|
489 |
+
(slice(None),) * i + (-1,) + (slice(None),) * (N-i-1)
|
490 |
+
] -= np.roll(X, -1, axis=i)[
|
491 |
+
(slice(None),) * i + (-1,) + (slice(None),) * (N-i-1)
|
492 |
+
]
|
493 |
+
|
494 |
+
if self.boundary_conditions == 'neumann':
|
495 |
+
Y[
|
496 |
+
(slice(None),) * i + (0,) + (slice(None),) * (N-i-1)
|
497 |
+
] += np.roll(X, 0, axis=i)[
|
498 |
+
(slice(None),) * i + (0,) + (slice(None),) * (N-i-1)
|
499 |
+
]
|
500 |
+
Y[
|
501 |
+
(slice(None),) * i + (-1,) + (slice(None),) * (N-i-1)
|
502 |
+
] += np.roll(X, 0, axis=i)[
|
503 |
+
(slice(None),) * i + (-1,) + (slice(None),) * (N-i-1)
|
504 |
+
]
|
505 |
+
|
506 |
+
return Y.reshape(-1, X.shape[-1])
|
507 |
+
|
508 |
+
def _matmat(self, x):
|
509 |
+
return self._matvec(x)
|
510 |
+
|
511 |
+
def _adjoint(self):
|
512 |
+
return self
|
513 |
+
|
514 |
+
def _transpose(self):
|
515 |
+
return self
|
516 |
+
|
517 |
+
|
518 |
+
class Sakurai(LinearOperator):
|
519 |
+
"""
|
520 |
+
Construct a Sakurai matrix in various formats and its eigenvalues.
|
521 |
+
|
522 |
+
Constructs the "Sakurai" matrix motivated by reference [1]_:
|
523 |
+
square real symmetric positive definite and 5-diagonal
|
524 |
+
with the main digonal ``[5, 6, 6, ..., 6, 6, 5], the ``+1`` and ``-1``
|
525 |
+
diagonals filled with ``-4``, and the ``+2`` and ``-2`` diagonals
|
526 |
+
made of ``1``. Its eigenvalues are analytically known to be
|
527 |
+
``16. * np.power(np.cos(0.5 * k * np.pi / (n + 1)), 4)``.
|
528 |
+
The matrix gets ill-conditioned with its size growing.
|
529 |
+
It is useful for testing and benchmarking sparse eigenvalue solvers
|
530 |
+
especially those taking advantage of its banded 5-diagonal structure.
|
531 |
+
See the notes below for details.
|
532 |
+
|
533 |
+
Parameters
|
534 |
+
----------
|
535 |
+
n : int
|
536 |
+
The size of the matrix.
|
537 |
+
dtype : dtype
|
538 |
+
Numerical type of the array. Default is ``np.int8``.
|
539 |
+
|
540 |
+
Methods
|
541 |
+
-------
|
542 |
+
toarray()
|
543 |
+
Construct a dense array from Laplacian data
|
544 |
+
tosparse()
|
545 |
+
Construct a sparse array from Laplacian data
|
546 |
+
tobanded()
|
547 |
+
The Sakurai matrix in the format for banded symmetric matrices,
|
548 |
+
i.e., (3, n) ndarray with 3 upper diagonals
|
549 |
+
placing the main diagonal at the bottom.
|
550 |
+
eigenvalues
|
551 |
+
All eigenvalues of the Sakurai matrix ordered ascending.
|
552 |
+
|
553 |
+
Notes
|
554 |
+
-----
|
555 |
+
Reference [1]_ introduces a generalized eigenproblem for the matrix pair
|
556 |
+
`A` and `B` where `A` is the identity so we turn it into an eigenproblem
|
557 |
+
just for the matrix `B` that this function outputs in various formats
|
558 |
+
together with its eigenvalues.
|
559 |
+
|
560 |
+
.. versionadded:: 1.12.0
|
561 |
+
|
562 |
+
References
|
563 |
+
----------
|
564 |
+
.. [1] T. Sakurai, H. Tadano, Y. Inadomi, and U. Nagashima,
|
565 |
+
"A moment-based method for large-scale generalized
|
566 |
+
eigenvalue problems",
|
567 |
+
Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004).
|
568 |
+
|
569 |
+
Examples
|
570 |
+
--------
|
571 |
+
>>> import numpy as np
|
572 |
+
>>> from scipy.sparse.linalg._special_sparse_arrays import Sakurai
|
573 |
+
>>> from scipy.linalg import eig_banded
|
574 |
+
>>> n = 6
|
575 |
+
>>> sak = Sakurai(n)
|
576 |
+
|
577 |
+
Since all matrix entries are small integers, ``'int8'`` is
|
578 |
+
the default dtype for storing matrix representations.
|
579 |
+
|
580 |
+
>>> sak.toarray()
|
581 |
+
array([[ 5, -4, 1, 0, 0, 0],
|
582 |
+
[-4, 6, -4, 1, 0, 0],
|
583 |
+
[ 1, -4, 6, -4, 1, 0],
|
584 |
+
[ 0, 1, -4, 6, -4, 1],
|
585 |
+
[ 0, 0, 1, -4, 6, -4],
|
586 |
+
[ 0, 0, 0, 1, -4, 5]], dtype=int8)
|
587 |
+
>>> sak.tobanded()
|
588 |
+
array([[ 1, 1, 1, 1, 1, 1],
|
589 |
+
[-4, -4, -4, -4, -4, -4],
|
590 |
+
[ 5, 6, 6, 6, 6, 5]], dtype=int8)
|
591 |
+
>>> sak.tosparse()
|
592 |
+
<6x6 sparse matrix of type '<class 'numpy.int8'>'
|
593 |
+
with 24 stored elements (5 diagonals) in DIAgonal format>
|
594 |
+
>>> np.array_equal(sak.dot(np.eye(n)), sak.tosparse().toarray())
|
595 |
+
True
|
596 |
+
>>> sak.eigenvalues()
|
597 |
+
array([0.03922866, 0.56703972, 2.41789479, 5.97822974,
|
598 |
+
10.54287655, 14.45473055])
|
599 |
+
>>> sak.eigenvalues(2)
|
600 |
+
array([0.03922866, 0.56703972])
|
601 |
+
|
602 |
+
The banded form can be used in scipy functions for banded matrices, e.g.,
|
603 |
+
|
604 |
+
>>> e = eig_banded(sak.tobanded(), eigvals_only=True)
|
605 |
+
>>> np.allclose(sak.eigenvalues, e, atol= n * n * n * np.finfo(float).eps)
|
606 |
+
True
|
607 |
+
|
608 |
+
"""
|
609 |
+
def __init__(self, n, dtype=np.int8):
|
610 |
+
self.n = n
|
611 |
+
self.dtype = dtype
|
612 |
+
shape = (n, n)
|
613 |
+
super().__init__(dtype, shape)
|
614 |
+
|
615 |
+
def eigenvalues(self, m=None):
|
616 |
+
"""Return the requested number of eigenvalues.
|
617 |
+
|
618 |
+
Parameters
|
619 |
+
----------
|
620 |
+
m : int, optional
|
621 |
+
The positive number of smallest eigenvalues to return.
|
622 |
+
If not provided, then all eigenvalues will be returned.
|
623 |
+
|
624 |
+
Returns
|
625 |
+
-------
|
626 |
+
eigenvalues : `np.float64` array
|
627 |
+
The requested `m` smallest or all eigenvalues, in ascending order.
|
628 |
+
"""
|
629 |
+
if m is None:
|
630 |
+
m = self.n
|
631 |
+
k = np.arange(self.n + 1 -m, self.n + 1)
|
632 |
+
return np.flip(16. * np.power(np.cos(0.5 * k * np.pi / (self.n + 1)), 4))
|
633 |
+
|
634 |
+
def tobanded(self):
|
635 |
+
"""
|
636 |
+
Construct the Sakurai matrix as a banded array.
|
637 |
+
"""
|
638 |
+
d0 = np.r_[5, 6 * np.ones(self.n - 2, dtype=self.dtype), 5]
|
639 |
+
d1 = -4 * np.ones(self.n, dtype=self.dtype)
|
640 |
+
d2 = np.ones(self.n, dtype=self.dtype)
|
641 |
+
return np.array([d2, d1, d0]).astype(self.dtype)
|
642 |
+
|
643 |
+
def tosparse(self):
|
644 |
+
"""
|
645 |
+
Construct the Sakurai matrix is a sparse format.
|
646 |
+
"""
|
647 |
+
from scipy.sparse import spdiags
|
648 |
+
d = self.tobanded()
|
649 |
+
# the banded format has the main diagonal at the bottom
|
650 |
+
# `spdiags` has no `dtype` parameter so inherits dtype from banded
|
651 |
+
return spdiags([d[0], d[1], d[2], d[1], d[0]], [-2, -1, 0, 1, 2],
|
652 |
+
self.n, self.n)
|
653 |
+
|
654 |
+
def toarray(self):
|
655 |
+
return self.tosparse().toarray()
|
656 |
+
|
657 |
+
def _matvec(self, x):
|
658 |
+
"""
|
659 |
+
Construct matrix-free callable banded-matrix-vector multiplication by
|
660 |
+
the Sakurai matrix without constructing or storing the matrix itself
|
661 |
+
using the knowledge of its entries and the 5-diagonal format.
|
662 |
+
"""
|
663 |
+
x = x.reshape(self.n, -1)
|
664 |
+
result_dtype = np.promote_types(x.dtype, self.dtype)
|
665 |
+
sx = np.zeros_like(x, dtype=result_dtype)
|
666 |
+
sx[0, :] = 5 * x[0, :] - 4 * x[1, :] + x[2, :]
|
667 |
+
sx[-1, :] = 5 * x[-1, :] - 4 * x[-2, :] + x[-3, :]
|
668 |
+
sx[1: -1, :] = (6 * x[1: -1, :] - 4 * (x[:-2, :] + x[2:, :])
|
669 |
+
+ np.pad(x[:-3, :], ((1, 0), (0, 0)))
|
670 |
+
+ np.pad(x[3:, :], ((0, 1), (0, 0))))
|
671 |
+
return sx
|
672 |
+
|
673 |
+
def _matmat(self, x):
|
674 |
+
"""
|
675 |
+
Construct matrix-free callable matrix-matrix multiplication by
|
676 |
+
the Sakurai matrix without constructing or storing the matrix itself
|
677 |
+
by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``.
|
678 |
+
"""
|
679 |
+
return self._matvec(x)
|
680 |
+
|
681 |
+
def _adjoint(self):
|
682 |
+
return self
|
683 |
+
|
684 |
+
def _transpose(self):
|
685 |
+
return self
|
686 |
+
|
687 |
+
|
688 |
+
class MikotaM(LinearOperator):
|
689 |
+
"""
|
690 |
+
Construct a mass matrix in various formats of Mikota pair.
|
691 |
+
|
692 |
+
The mass matrix `M` is square real diagonal
|
693 |
+
positive definite with entries that are reciprocal to integers.
|
694 |
+
|
695 |
+
Parameters
|
696 |
+
----------
|
697 |
+
shape : tuple of int
|
698 |
+
The shape of the matrix.
|
699 |
+
dtype : dtype
|
700 |
+
Numerical type of the array. Default is ``np.float64``.
|
701 |
+
|
702 |
+
Methods
|
703 |
+
-------
|
704 |
+
toarray()
|
705 |
+
Construct a dense array from Mikota data
|
706 |
+
tosparse()
|
707 |
+
Construct a sparse array from Mikota data
|
708 |
+
tobanded()
|
709 |
+
The format for banded symmetric matrices,
|
710 |
+
i.e., (1, n) ndarray with the main diagonal.
|
711 |
+
"""
|
712 |
+
def __init__(self, shape, dtype=np.float64):
|
713 |
+
self.shape = shape
|
714 |
+
self.dtype = dtype
|
715 |
+
super().__init__(dtype, shape)
|
716 |
+
|
717 |
+
def _diag(self):
|
718 |
+
# The matrix is constructed from its diagonal 1 / [1, ..., N+1];
|
719 |
+
# compute in a function to avoid duplicated code & storage footprint
|
720 |
+
return (1. / np.arange(1, self.shape[0] + 1)).astype(self.dtype)
|
721 |
+
|
722 |
+
def tobanded(self):
|
723 |
+
return self._diag()
|
724 |
+
|
725 |
+
def tosparse(self):
|
726 |
+
from scipy.sparse import diags
|
727 |
+
return diags([self._diag()], [0], shape=self.shape, dtype=self.dtype)
|
728 |
+
|
729 |
+
def toarray(self):
|
730 |
+
return np.diag(self._diag()).astype(self.dtype)
|
731 |
+
|
732 |
+
def _matvec(self, x):
|
733 |
+
"""
|
734 |
+
Construct matrix-free callable banded-matrix-vector multiplication by
|
735 |
+
the Mikota mass matrix without constructing or storing the matrix itself
|
736 |
+
using the knowledge of its entries and the diagonal format.
|
737 |
+
"""
|
738 |
+
x = x.reshape(self.shape[0], -1)
|
739 |
+
return self._diag()[:, np.newaxis] * x
|
740 |
+
|
741 |
+
def _matmat(self, x):
|
742 |
+
"""
|
743 |
+
Construct matrix-free callable matrix-matrix multiplication by
|
744 |
+
the Mikota mass matrix without constructing or storing the matrix itself
|
745 |
+
by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``.
|
746 |
+
"""
|
747 |
+
return self._matvec(x)
|
748 |
+
|
749 |
+
def _adjoint(self):
|
750 |
+
return self
|
751 |
+
|
752 |
+
def _transpose(self):
|
753 |
+
return self
|
754 |
+
|
755 |
+
|
756 |
+
class MikotaK(LinearOperator):
|
757 |
+
"""
|
758 |
+
Construct a stiffness matrix in various formats of Mikota pair.
|
759 |
+
|
760 |
+
The stiffness matrix `K` is square real tri-diagonal symmetric
|
761 |
+
positive definite with integer entries.
|
762 |
+
|
763 |
+
Parameters
|
764 |
+
----------
|
765 |
+
shape : tuple of int
|
766 |
+
The shape of the matrix.
|
767 |
+
dtype : dtype
|
768 |
+
Numerical type of the array. Default is ``np.int32``.
|
769 |
+
|
770 |
+
Methods
|
771 |
+
-------
|
772 |
+
toarray()
|
773 |
+
Construct a dense array from Mikota data
|
774 |
+
tosparse()
|
775 |
+
Construct a sparse array from Mikota data
|
776 |
+
tobanded()
|
777 |
+
The format for banded symmetric matrices,
|
778 |
+
i.e., (2, n) ndarray with 2 upper diagonals
|
779 |
+
placing the main diagonal at the bottom.
|
780 |
+
"""
|
781 |
+
def __init__(self, shape, dtype=np.int32):
|
782 |
+
self.shape = shape
|
783 |
+
self.dtype = dtype
|
784 |
+
super().__init__(dtype, shape)
|
785 |
+
# The matrix is constructed from its diagonals;
|
786 |
+
# we precompute these to avoid duplicating the computation
|
787 |
+
n = shape[0]
|
788 |
+
self._diag0 = np.arange(2 * n - 1, 0, -2, dtype=self.dtype)
|
789 |
+
self._diag1 = - np.arange(n - 1, 0, -1, dtype=self.dtype)
|
790 |
+
|
791 |
+
def tobanded(self):
|
792 |
+
return np.array([np.pad(self._diag1, (1, 0), 'constant'), self._diag0])
|
793 |
+
|
794 |
+
def tosparse(self):
|
795 |
+
from scipy.sparse import diags
|
796 |
+
return diags([self._diag1, self._diag0, self._diag1], [-1, 0, 1],
|
797 |
+
shape=self.shape, dtype=self.dtype)
|
798 |
+
|
799 |
+
def toarray(self):
|
800 |
+
return self.tosparse().toarray()
|
801 |
+
|
802 |
+
def _matvec(self, x):
|
803 |
+
"""
|
804 |
+
Construct matrix-free callable banded-matrix-vector multiplication by
|
805 |
+
the Mikota stiffness matrix without constructing or storing the matrix
|
806 |
+
itself using the knowledge of its entries and the 3-diagonal format.
|
807 |
+
"""
|
808 |
+
x = x.reshape(self.shape[0], -1)
|
809 |
+
result_dtype = np.promote_types(x.dtype, self.dtype)
|
810 |
+
kx = np.zeros_like(x, dtype=result_dtype)
|
811 |
+
d1 = self._diag1
|
812 |
+
d0 = self._diag0
|
813 |
+
kx[0, :] = d0[0] * x[0, :] + d1[0] * x[1, :]
|
814 |
+
kx[-1, :] = d1[-1] * x[-2, :] + d0[-1] * x[-1, :]
|
815 |
+
kx[1: -1, :] = (d1[:-1, None] * x[: -2, :]
|
816 |
+
+ d0[1: -1, None] * x[1: -1, :]
|
817 |
+
+ d1[1:, None] * x[2:, :])
|
818 |
+
return kx
|
819 |
+
|
820 |
+
def _matmat(self, x):
|
821 |
+
"""
|
822 |
+
Construct matrix-free callable matrix-matrix multiplication by
|
823 |
+
the Stiffness mass matrix without constructing or storing the matrix itself
|
824 |
+
by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``.
|
825 |
+
"""
|
826 |
+
return self._matvec(x)
|
827 |
+
|
828 |
+
def _adjoint(self):
|
829 |
+
return self
|
830 |
+
|
831 |
+
def _transpose(self):
|
832 |
+
return self
|
833 |
+
|
834 |
+
|
835 |
+
class MikotaPair:
|
836 |
+
"""
|
837 |
+
Construct the Mikota pair of matrices in various formats and
|
838 |
+
eigenvalues of the generalized eigenproblem with them.
|
839 |
+
|
840 |
+
The Mikota pair of matrices [1, 2]_ models a vibration problem
|
841 |
+
of a linear mass-spring system with the ends attached where
|
842 |
+
the stiffness of the springs and the masses increase along
|
843 |
+
the system length such that vibration frequencies are subsequent
|
844 |
+
integers 1, 2, ..., `n` where `n` is the number of the masses. Thus,
|
845 |
+
eigenvalues of the generalized eigenvalue problem for
|
846 |
+
the matrix pair `K` and `M` where `K` is he system stiffness matrix
|
847 |
+
and `M` is the system mass matrix are the squares of the integers,
|
848 |
+
i.e., 1, 4, 9, ..., ``n * n``.
|
849 |
+
|
850 |
+
The stiffness matrix `K` is square real tri-diagonal symmetric
|
851 |
+
positive definite. The mass matrix `M` is diagonal with diagonal
|
852 |
+
entries 1, 1/2, 1/3, ...., ``1/n``. Both matrices get
|
853 |
+
ill-conditioned with `n` growing.
|
854 |
+
|
855 |
+
Parameters
|
856 |
+
----------
|
857 |
+
n : int
|
858 |
+
The size of the matrices of the Mikota pair.
|
859 |
+
dtype : dtype
|
860 |
+
Numerical type of the array. Default is ``np.float64``.
|
861 |
+
|
862 |
+
Attributes
|
863 |
+
----------
|
864 |
+
eigenvalues : 1D ndarray, ``np.uint64``
|
865 |
+
All eigenvalues of the Mikota pair ordered ascending.
|
866 |
+
|
867 |
+
Methods
|
868 |
+
-------
|
869 |
+
MikotaK()
|
870 |
+
A `LinearOperator` custom object for the stiffness matrix.
|
871 |
+
MikotaM()
|
872 |
+
A `LinearOperator` custom object for the mass matrix.
|
873 |
+
|
874 |
+
.. versionadded:: 1.12.0
|
875 |
+
|
876 |
+
References
|
877 |
+
----------
|
878 |
+
.. [1] J. Mikota, "Frequency tuning of chain structure multibody oscillators
|
879 |
+
to place the natural frequencies at omega1 and N-1 integer multiples
|
880 |
+
omega2,..., omegaN", Z. Angew. Math. Mech. 81 (2001), S2, S201-S202.
|
881 |
+
Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004).
|
882 |
+
.. [2] Peter C. Muller and Metin Gurgoze,
|
883 |
+
"Natural frequencies of a multi-degree-of-freedom vibration system",
|
884 |
+
Proc. Appl. Math. Mech. 6, 319-320 (2006).
|
885 |
+
http://dx.doi.org/10.1002/pamm.200610141.
|
886 |
+
|
887 |
+
Examples
|
888 |
+
--------
|
889 |
+
>>> import numpy as np
|
890 |
+
>>> from scipy.sparse.linalg._special_sparse_arrays import MikotaPair
|
891 |
+
>>> n = 6
|
892 |
+
>>> mik = MikotaPair(n)
|
893 |
+
>>> mik_k = mik.k
|
894 |
+
>>> mik_m = mik.m
|
895 |
+
>>> mik_k.toarray()
|
896 |
+
array([[11., -5., 0., 0., 0., 0.],
|
897 |
+
[-5., 9., -4., 0., 0., 0.],
|
898 |
+
[ 0., -4., 7., -3., 0., 0.],
|
899 |
+
[ 0., 0., -3., 5., -2., 0.],
|
900 |
+
[ 0., 0., 0., -2., 3., -1.],
|
901 |
+
[ 0., 0., 0., 0., -1., 1.]])
|
902 |
+
>>> mik_k.tobanded()
|
903 |
+
array([[ 0., -5., -4., -3., -2., -1.],
|
904 |
+
[11., 9., 7., 5., 3., 1.]])
|
905 |
+
>>> mik_m.tobanded()
|
906 |
+
array([1. , 0.5 , 0.33333333, 0.25 , 0.2 ,
|
907 |
+
0.16666667])
|
908 |
+
>>> mik_k.tosparse()
|
909 |
+
<6x6 sparse matrix of type '<class 'numpy.float64'>'
|
910 |
+
with 16 stored elements (3 diagonals) in DIAgonal format>
|
911 |
+
>>> mik_m.tosparse()
|
912 |
+
<6x6 sparse matrix of type '<class 'numpy.float64'>'
|
913 |
+
with 6 stored elements (1 diagonals) in DIAgonal format>
|
914 |
+
>>> np.array_equal(mik_k(np.eye(n)), mik_k.toarray())
|
915 |
+
True
|
916 |
+
>>> np.array_equal(mik_m(np.eye(n)), mik_m.toarray())
|
917 |
+
True
|
918 |
+
>>> mik.eigenvalues()
|
919 |
+
array([ 1, 4, 9, 16, 25, 36])
|
920 |
+
>>> mik.eigenvalues(2)
|
921 |
+
array([ 1, 4])
|
922 |
+
|
923 |
+
"""
|
924 |
+
def __init__(self, n, dtype=np.float64):
|
925 |
+
self.n = n
|
926 |
+
self.dtype = dtype
|
927 |
+
self.shape = (n, n)
|
928 |
+
self.m = MikotaM(self.shape, self.dtype)
|
929 |
+
self.k = MikotaK(self.shape, self.dtype)
|
930 |
+
|
931 |
+
def eigenvalues(self, m=None):
|
932 |
+
"""Return the requested number of eigenvalues.
|
933 |
+
|
934 |
+
Parameters
|
935 |
+
----------
|
936 |
+
m : int, optional
|
937 |
+
The positive number of smallest eigenvalues to return.
|
938 |
+
If not provided, then all eigenvalues will be returned.
|
939 |
+
|
940 |
+
Returns
|
941 |
+
-------
|
942 |
+
eigenvalues : `np.uint64` array
|
943 |
+
The requested `m` smallest or all eigenvalues, in ascending order.
|
944 |
+
"""
|
945 |
+
if m is None:
|
946 |
+
m = self.n
|
947 |
+
arange_plus1 = np.arange(1, m + 1, dtype=np.uint64)
|
948 |
+
return arange_plus1 * arange_plus1
|
venv/lib/python3.10/site-packages/scipy/sparse/linalg/dsolve.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
2 |
+
# Use the `scipy.sparse.linalg` namespace for importing the functions
|
3 |
+
# included below.
|
4 |
+
|
5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = [ # noqa: F822
|
9 |
+
'MatrixRankWarning', 'SuperLU', 'factorized',
|
10 |
+
'spilu', 'splu', 'spsolve',
|
11 |
+
'spsolve_triangular', 'use_solver', 'linsolve', 'test'
|
12 |
+
]
|
13 |
+
|
14 |
+
dsolve_modules = ['linsolve']
|
15 |
+
|
16 |
+
|
17 |
+
def __dir__():
|
18 |
+
return __all__
|
19 |
+
|
20 |
+
|
21 |
+
def __getattr__(name):
|
22 |
+
return _sub_module_deprecation(sub_package="sparse.linalg", module="dsolve",
|
23 |
+
private_modules=["_dsolve"], all=__all__,
|
24 |
+
attribute=name)
|
venv/lib/python3.10/site-packages/scipy/sparse/linalg/eigen.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
2 |
+
# Use the `scipy.sparse.linalg` namespace for importing the functions
|
3 |
+
# included below.
|
4 |
+
|
5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = [ # noqa: F822
|
9 |
+
'ArpackError', 'ArpackNoConvergence', 'ArpackError',
|
10 |
+
'eigs', 'eigsh', 'lobpcg', 'svds', 'arpack', 'test'
|
11 |
+
]
|
12 |
+
|
13 |
+
eigen_modules = ['arpack']
|
14 |
+
|
15 |
+
|
16 |
+
def __dir__():
|
17 |
+
return __all__
|
18 |
+
|
19 |
+
|
20 |
+
def __getattr__(name):
|
21 |
+
return _sub_module_deprecation(sub_package="sparse.linalg", module="eigen",
|
22 |
+
private_modules=["_eigen"], all=__all__,
|
23 |
+
attribute=name)
|
venv/lib/python3.10/site-packages/scipy/sparse/linalg/interface.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
2 |
+
# Use the `scipy.sparse.linalg` namespace for importing the functions
|
3 |
+
# included below.
|
4 |
+
|
5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = [ # noqa: F822
|
9 |
+
'LinearOperator', 'aslinearoperator',
|
10 |
+
'isshape', 'isintlike', 'asmatrix',
|
11 |
+
'is_pydata_spmatrix', 'MatrixLinearOperator', 'IdentityOperator'
|
12 |
+
]
|
13 |
+
|
14 |
+
|
15 |
+
def __dir__():
|
16 |
+
return __all__
|
17 |
+
|
18 |
+
|
19 |
+
def __getattr__(name):
|
20 |
+
return _sub_module_deprecation(sub_package="sparse.linalg", module="interface",
|
21 |
+
private_modules=["_interface"], all=__all__,
|
22 |
+
attribute=name)
|
venv/lib/python3.10/site-packages/scipy/sparse/linalg/isolve.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
2 |
+
# Use the `scipy.sparse.linalg` namespace for importing the functions
|
3 |
+
# included below.
|
4 |
+
|
5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = [ # noqa: F822
|
9 |
+
'bicg', 'bicgstab', 'cg', 'cgs', 'gcrotmk', 'gmres',
|
10 |
+
'lgmres', 'lsmr', 'lsqr',
|
11 |
+
'minres', 'qmr', 'tfqmr', 'utils', 'iterative', 'test'
|
12 |
+
]
|
13 |
+
|
14 |
+
|
15 |
+
def __dir__():
|
16 |
+
return __all__
|
17 |
+
|
18 |
+
|
19 |
+
def __getattr__(name):
|
20 |
+
return _sub_module_deprecation(sub_package="sparse.linalg", module="isolve",
|
21 |
+
private_modules=["_isolve"], all=__all__,
|
22 |
+
attribute=name)
|
venv/lib/python3.10/site-packages/scipy/sparse/linalg/matfuncs.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
2 |
+
# Use the `scipy.sparse.linalg` namespace for importing the functions
|
3 |
+
# included below.
|
4 |
+
|
5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = [ # noqa: F822
|
9 |
+
'expm', 'inv', 'solve', 'solve_triangular',
|
10 |
+
'spsolve', 'is_pydata_spmatrix', 'LinearOperator',
|
11 |
+
'UPPER_TRIANGULAR', 'MatrixPowerOperator', 'ProductOperator'
|
12 |
+
]
|
13 |
+
|
14 |
+
|
15 |
+
def __dir__():
|
16 |
+
return __all__
|
17 |
+
|
18 |
+
|
19 |
+
def __getattr__(name):
|
20 |
+
return _sub_module_deprecation(sub_package="sparse.linalg", module="matfuncs",
|
21 |
+
private_modules=["_matfuncs"], all=__all__,
|
22 |
+
attribute=name)
|
venv/lib/python3.10/site-packages/scipy/sparse/tests/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (186 Bytes). View file
|
|