diff --git a/ckpts/universal/global_step40/zero/11.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/11.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..6807017d940b54b380dd5fecaaa9c2ddb7a5886b --- /dev/null +++ b/ckpts/universal/global_step40/zero/11.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f744b2dee30f8590d57d433571c451f71516cfc0d8dda4aeb56cdb49f8ab3e69 +size 9293 diff --git a/ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..cfc172664e94dfd7ff4f6fae2e5ebd3736946d91 --- /dev/null +++ b/ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a33f07f55c8a1ab36bf4ff987be5711b50931635a3de9d5197d6759952d70b87 +size 33555612 diff --git a/ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..c36005362ad22c3d7267e411f6ea7ef0a52729da --- /dev/null +++ b/ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cba805c3aad0e844d7f976deb1110bd57596a4c1fbc043a118bcb534d97dea9 +size 33555627 diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/__init__.py b/venv/lib/python3.10/site-packages/scipy/cluster/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c975bacc61311a4dd020dc9baaedba3befe319ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/cluster/__init__.py @@ -0,0 +1,31 @@ +""" +========================================= +Clustering package (:mod:`scipy.cluster`) +========================================= + +.. currentmodule:: scipy.cluster + +.. toctree:: + :hidden: + + cluster.vq + cluster.hierarchy + +Clustering algorithms are useful in information theory, target detection, +communications, compression, and other areas. The `vq` module only +supports vector quantization and the k-means algorithms. + +The `hierarchy` module provides functions for hierarchical and +agglomerative clustering. Its features include generating hierarchical +clusters from distance matrices, +calculating statistics on clusters, cutting linkages +to generate flat clusters, and visualizing clusters with dendrograms. + +""" +__all__ = ['vq', 'hierarchy'] + +from . import vq, hierarchy + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2c26033c53e222410c7809efade512ef1346cb31 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c5117f66d4551253475ddde28e49f6f169d8fbb9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ed59337d4cf218405bd5c30219f86fcc937cde95 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/hierarchy.py b/venv/lib/python3.10/site-packages/scipy/cluster/hierarchy.py new file mode 100644 index 0000000000000000000000000000000000000000..6bdafcc7d57650e05020d2a6784b98b827da4f18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/cluster/hierarchy.py @@ -0,0 +1,4173 @@ +""" +Hierarchical clustering (:mod:`scipy.cluster.hierarchy`) +======================================================== + +.. currentmodule:: scipy.cluster.hierarchy + +These functions cut hierarchical clusterings into flat clusterings +or find the roots of the forest formed by a cut by providing the flat +cluster ids of each observation. + +.. autosummary:: + :toctree: generated/ + + fcluster + fclusterdata + leaders + +These are routines for agglomerative clustering. + +.. autosummary:: + :toctree: generated/ + + linkage + single + complete + average + weighted + centroid + median + ward + +These routines compute statistics on hierarchies. + +.. autosummary:: + :toctree: generated/ + + cophenet + from_mlab_linkage + inconsistent + maxinconsts + maxdists + maxRstat + to_mlab_linkage + +Routines for visualizing flat clusters. + +.. autosummary:: + :toctree: generated/ + + dendrogram + +These are data structures and routines for representing hierarchies as +tree objects. + +.. autosummary:: + :toctree: generated/ + + ClusterNode + leaves_list + to_tree + cut_tree + optimal_leaf_ordering + +These are predicates for checking the validity of linkage and +inconsistency matrices as well as for checking isomorphism of two +flat cluster assignments. + +.. autosummary:: + :toctree: generated/ + + is_valid_im + is_valid_linkage + is_isomorphic + is_monotonic + correspond + num_obs_linkage + +Utility routines for plotting: + +.. autosummary:: + :toctree: generated/ + + set_link_color_palette + +Utility classes: + +.. autosummary:: + :toctree: generated/ + + DisjointSet -- data structure for incremental connectivity queries + +""" +# Copyright (C) Damian Eads, 2007-2008. New BSD License. + +# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com) +# +# Author: Damian Eads +# Date: September 22, 2007 +# +# Copyright (c) 2007, 2008, Damian Eads +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# - Redistributions of source code must retain the above +# copyright notice, this list of conditions and the +# following disclaimer. +# - Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# - Neither the name of the author nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import warnings +import bisect +from collections import deque + +import numpy as np +from . import _hierarchy, _optimal_leaf_ordering +import scipy.spatial.distance as distance +from scipy._lib._array_api import array_namespace, _asarray, copy +from scipy._lib._disjoint_set import DisjointSet + + +_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3, + 'median': 4, 'ward': 5, 'weighted': 6} +_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward') + +__all__ = ['ClusterNode', 'DisjointSet', 'average', 'centroid', 'complete', + 'cophenet', 'correspond', 'cut_tree', 'dendrogram', 'fcluster', + 'fclusterdata', 'from_mlab_linkage', 'inconsistent', + 'is_isomorphic', 'is_monotonic', 'is_valid_im', 'is_valid_linkage', + 'leaders', 'leaves_list', 'linkage', 'maxRstat', 'maxdists', + 'maxinconsts', 'median', 'num_obs_linkage', 'optimal_leaf_ordering', + 'set_link_color_palette', 'single', 'to_mlab_linkage', 'to_tree', + 'ward', 'weighted'] + + +class ClusterWarning(UserWarning): + pass + + +def _warning(s): + warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3) + + +def int_floor(arr, xp): + # array_api_strict is strict about not allowing `int()` on a float array. + # That's typically not needed, here it is - so explicitly convert + return int(xp.astype(xp.asarray(arr), xp.int64)) + + +def single(y): + """ + Perform single/min/nearest linkage on the condensed distance matrix ``y``. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + The linkage matrix. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import single, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = single(y) + >>> Z + array([[ 0., 1., 1., 2.], + [ 2., 12., 1., 3.], + [ 3., 4., 1., 2.], + [ 5., 14., 1., 3.], + [ 6., 7., 1., 2.], + [ 8., 16., 1., 3.], + [ 9., 10., 1., 2.], + [11., 18., 1., 3.], + [13., 15., 2., 6.], + [17., 20., 2., 9.], + [19., 21., 2., 12.]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 4, 5, 6, 1, 2, 3], dtype=int32) + >>> fcluster(Z, 1, criterion='distance') + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + >>> fcluster(Z, 2, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + """ + return linkage(y, method='single', metric='euclidean') + + +def complete(y): + """ + Perform complete/max/farthest point linkage on a condensed distance matrix. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + the `linkage` function documentation for more information + on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import complete, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = complete(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.41421356, 3. ], + [ 5. , 13. , 1.41421356, 3. ], + [ 8. , 14. , 1.41421356, 3. ], + [11. , 15. , 1.41421356, 3. ], + [16. , 17. , 4.12310563, 6. ], + [18. , 19. , 4.12310563, 6. ], + [20. , 21. , 5.65685425, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 4.5, criterion='distance') + array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + """ + return linkage(y, method='complete', metric='euclidean') + + +def average(y): + """ + Perform average/UPGMA linkage on a condensed distance matrix. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + `linkage` for more information on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import average, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = average(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.20710678, 3. ], + [ 5. , 13. , 1.20710678, 3. ], + [ 8. , 14. , 1.20710678, 3. ], + [11. , 15. , 1.20710678, 3. ], + [16. , 17. , 3.39675184, 6. ], + [18. , 19. , 3.39675184, 6. ], + [20. , 21. , 4.09206523, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='average', metric='euclidean') + + +def weighted(y): + """ + Perform weighted/WPGMA linkage on the condensed distance matrix. + + See `linkage` for more information on the return + structure and algorithm. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + `linkage` for more information on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import weighted, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = weighted(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 11. , 1. , 2. ], + [ 2. , 12. , 1.20710678, 3. ], + [ 8. , 13. , 1.20710678, 3. ], + [ 5. , 14. , 1.20710678, 3. ], + [10. , 15. , 1.20710678, 3. ], + [18. , 19. , 3.05595762, 6. ], + [16. , 17. , 3.32379407, 6. ], + [20. , 21. , 4.06357713, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 1, 2, 3, 10, 11, 12, 4, 6, 5], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='weighted', metric='euclidean') + + +def centroid(y): + """ + Perform centroid/UPGMC linkage. + + See `linkage` for more information on the input matrix, + return structure, and algorithm. + + The following are common calling conventions: + + 1. ``Z = centroid(y)`` + + Performs centroid/UPGMC linkage on the condensed distance + matrix ``y``. + + 2. ``Z = centroid(X)`` + + Performs centroid/UPGMC linkage on the observation matrix ``X`` + using Euclidean distance as the distance metric. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + an m by n array. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + the `linkage` function documentation for more information + on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import centroid, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = centroid(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3.33333333, 6. ], + [16. , 17. , 3.33333333, 6. ], + [20. , 21. , 3.33333333, 12. ]]) # may vary + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32) # may vary + >>> fcluster(Z, 1.1, criterion='distance') + array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32) # may vary + >>> fcluster(Z, 2, criterion='distance') + array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32) # may vary + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='centroid', metric='euclidean') + + +def median(y): + """ + Perform median/WPGMC linkage. + + See `linkage` for more information on the return structure + and algorithm. + + The following are common calling conventions: + + 1. ``Z = median(y)`` + + Performs median/WPGMC linkage on the condensed distance matrix + ``y``. See ``linkage`` for more information on the return + structure and algorithm. + + 2. ``Z = median(X)`` + + Performs median/WPGMC linkage on the observation matrix ``X`` + using Euclidean distance as the distance metric. See `linkage` + for more information on the return structure and algorithm. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + an m by n array. + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = median(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32) + >>> fcluster(Z, 1.1, criterion='distance') + array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32) + >>> fcluster(Z, 2, criterion='distance') + array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='median', metric='euclidean') + + +def ward(y): + """ + Perform Ward's linkage on a condensed distance matrix. + + See `linkage` for more information on the return structure + and algorithm. + + The following are common calling conventions: + + 1. ``Z = ward(y)`` + Performs Ward's linkage on the condensed distance matrix ``y``. + + 2. ``Z = ward(X)`` + Performs Ward's linkage on the observation matrix ``X`` using + Euclidean distance as the distance metric. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + an m by n array. + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. See + `linkage` for more information on the return structure and + algorithm. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = ward(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.1, criterion='distance') + array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32) + >>> fcluster(Z, 3, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 9, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='ward', metric='euclidean') + + +def linkage(y, method='single', metric='euclidean', optimal_ordering=False): + """ + Perform hierarchical/agglomerative clustering. + + The input y may be either a 1-D condensed distance matrix + or a 2-D array of observation vectors. + + If y is a 1-D condensed distance matrix, + then y must be a :math:`\\binom{n}{2}` sized + vector, where n is the number of original observations paired + in the distance matrix. The behavior of this function is very + similar to the MATLAB linkage function. + + A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the + :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and + ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A + cluster with an index less than :math:`n` corresponds to one of + the :math:`n` original observations. The distance between + clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The + fourth value ``Z[i, 3]`` represents the number of original + observations in the newly formed cluster. + + The following linkage methods are used to compute the distance + :math:`d(s, t)` between two clusters :math:`s` and + :math:`t`. The algorithm begins with a forest of clusters that + have yet to be used in the hierarchy being formed. When two + clusters :math:`s` and :math:`t` from this forest are combined + into a single cluster :math:`u`, :math:`s` and :math:`t` are + removed from the forest, and :math:`u` is added to the + forest. When only one cluster remains in the forest, the algorithm + stops, and this cluster becomes the root. + + A distance matrix is maintained at each iteration. The ``d[i,j]`` + entry corresponds to the distance between cluster :math:`i` and + :math:`j` in the original forest. + + At each iteration, the algorithm must update the distance matrix + to reflect the distance of the newly formed cluster u with the + remaining clusters in the forest. + + Suppose there are :math:`|u|` original observations + :math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and + :math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in + cluster :math:`v`. Recall, :math:`s` and :math:`t` are + combined to form cluster :math:`u`. Let :math:`v` be any + remaining cluster in the forest that is not :math:`u`. + + The following are methods for calculating the distance between the + newly formed cluster :math:`u` and each :math:`v`. + + * method='single' assigns + + .. math:: + d(u,v) = \\min(dist(u[i],v[j])) + + for all points :math:`i` in cluster :math:`u` and + :math:`j` in cluster :math:`v`. This is also known as the + Nearest Point Algorithm. + + * method='complete' assigns + + .. math:: + d(u, v) = \\max(dist(u[i],v[j])) + + for all points :math:`i` in cluster u and :math:`j` in + cluster :math:`v`. This is also known by the Farthest Point + Algorithm or Voor Hees Algorithm. + + * method='average' assigns + + .. math:: + d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])} + {(|u|*|v|)} + + for all points :math:`i` and :math:`j` where :math:`|u|` + and :math:`|v|` are the cardinalities of clusters :math:`u` + and :math:`v`, respectively. This is also called the UPGMA + algorithm. + + * method='weighted' assigns + + .. math:: + d(u,v) = (dist(s,v) + dist(t,v))/2 + + where cluster u was formed with cluster s and t and v + is a remaining cluster in the forest (also called WPGMA). + + * method='centroid' assigns + + .. math:: + dist(s,t) = ||c_s-c_t||_2 + + where :math:`c_s` and :math:`c_t` are the centroids of + clusters :math:`s` and :math:`t`, respectively. When two + clusters :math:`s` and :math:`t` are combined into a new + cluster :math:`u`, the new centroid is computed over all the + original objects in clusters :math:`s` and :math:`t`. The + distance then becomes the Euclidean distance between the + centroid of :math:`u` and the centroid of a remaining cluster + :math:`v` in the forest. This is also known as the UPGMC + algorithm. + + * method='median' assigns :math:`d(s,t)` like the ``centroid`` + method. When two clusters :math:`s` and :math:`t` are combined + into a new cluster :math:`u`, the average of centroids s and t + give the new centroid :math:`u`. This is also known as the + WPGMC algorithm. + + * method='ward' uses the Ward variance minimization algorithm. + The new entry :math:`d(u,v)` is computed as follows, + + .. math:: + + d(u,v) = \\sqrt{\\frac{|v|+|s|} + {T}d(v,s)^2 + + \\frac{|v|+|t|} + {T}d(v,t)^2 + - \\frac{|v|} + {T}d(s,t)^2} + + where :math:`u` is the newly joined cluster consisting of + clusters :math:`s` and :math:`t`, :math:`v` is an unused + cluster in the forest, :math:`T=|v|+|s|+|t|`, and + :math:`|*|` is the cardinality of its argument. This is also + known as the incremental algorithm. + + Warning: When the minimum distance pair in the forest is chosen, there + may be two or more pairs with the same minimum distance. This + implementation may choose a different minimum than the MATLAB + version. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed distance matrix + is a flat array containing the upper triangular of the distance matrix. + This is the form that ``pdist`` returns. Alternatively, a collection of + :math:`m` observation vectors in :math:`n` dimensions may be passed as + an :math:`m` by :math:`n` array. All elements of the condensed distance + matrix must be finite, i.e., no NaNs or infs. + method : str, optional + The linkage algorithm to use. See the ``Linkage Methods`` section below + for full descriptions. + metric : str or function, optional + The distance metric to use in the case that y is a collection of + observation vectors; ignored otherwise. See the ``pdist`` + function for a list of valid distance metrics. A custom distance + function can also be used. + optimal_ordering : bool, optional + If True, the linkage matrix will be reordered so that the distance + between successive leaves is minimal. This results in a more intuitive + tree structure when the data are visualized. defaults to False, because + this algorithm can be slow, particularly on large datasets [2]_. See + also the `optimal_leaf_ordering` function. + + .. versionadded:: 1.0.0 + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. + + Notes + ----- + 1. For method 'single', an optimized algorithm based on minimum spanning + tree is implemented. It has time complexity :math:`O(n^2)`. + For methods 'complete', 'average', 'weighted' and 'ward', an algorithm + called nearest-neighbors chain is implemented. It also has time + complexity :math:`O(n^2)`. + For other methods, a naive algorithm is implemented with :math:`O(n^3)` + time complexity. + All algorithms use :math:`O(n^2)` memory. + Refer to [1]_ for details about the algorithms. + 2. Methods 'centroid', 'median', and 'ward' are correctly defined only if + Euclidean pairwise metric is used. If `y` is passed as precomputed + pairwise distances, then it is the user's responsibility to assure that + these distances are in fact Euclidean, otherwise the produced result + will be incorrect. + + See Also + -------- + scipy.spatial.distance.pdist : pairwise distance metrics + + References + ---------- + .. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering + algorithms", :arXiv:`1109.2378v1`. + .. [2] Ziv Bar-Joseph, David K. Gifford, Tommi S. Jaakkola, "Fast optimal + leaf ordering for hierarchical clustering", 2001. Bioinformatics + :doi:`10.1093/bioinformatics/17.suppl_1.S22` + + Examples + -------- + >>> from scipy.cluster.hierarchy import dendrogram, linkage + >>> from matplotlib import pyplot as plt + >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] + + >>> Z = linkage(X, 'ward') + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + + >>> Z = linkage(X, 'single') + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + >>> plt.show() + """ + xp = array_namespace(y) + y = _asarray(y, order='C', dtype=xp.float64, xp=xp) + + if method not in _LINKAGE_METHODS: + raise ValueError(f"Invalid method: {method}") + + if method in _EUCLIDEAN_METHODS and metric != 'euclidean' and y.ndim == 2: + msg = f"`method={method}` requires the distance metric to be Euclidean" + raise ValueError(msg) + + if y.ndim == 1: + distance.is_valid_y(y, throw=True, name='y') + elif y.ndim == 2: + if (y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0) and + xp.all(y >= 0) and np.allclose(y, y.T)): + warnings.warn('The symmetric non-negative hollow observation ' + 'matrix looks suspiciously like an uncondensed ' + 'distance matrix', + ClusterWarning, stacklevel=2) + y = distance.pdist(y, metric) + y = xp.asarray(y) + else: + raise ValueError("`y` must be 1 or 2 dimensional.") + + if not xp.all(xp.isfinite(y)): + raise ValueError("The condensed distance matrix must contain only " + "finite values.") + + n = int(distance.num_obs_y(y)) + method_code = _LINKAGE_METHODS[method] + + y = np.asarray(y) + if method == 'single': + result = _hierarchy.mst_single_linkage(y, n) + elif method in ['complete', 'average', 'weighted', 'ward']: + result = _hierarchy.nn_chain(y, n, method_code) + else: + result = _hierarchy.fast_linkage(y, n, method_code) + result = xp.asarray(result) + + if optimal_ordering: + y = xp.asarray(y) + return optimal_leaf_ordering(result, y) + else: + return result + + +class ClusterNode: + """ + A tree node class for representing a cluster. + + Leaf nodes correspond to original observations, while non-leaf nodes + correspond to non-singleton clusters. + + The `to_tree` function converts a matrix returned by the linkage + function into an easy-to-use tree representation. + + All parameter names are also attributes. + + Parameters + ---------- + id : int + The node id. + left : ClusterNode instance, optional + The left child tree node. + right : ClusterNode instance, optional + The right child tree node. + dist : float, optional + Distance for this cluster in the linkage matrix. + count : int, optional + The number of samples in this cluster. + + See Also + -------- + to_tree : for converting a linkage matrix ``Z`` into a tree object. + + """ + + def __init__(self, id, left=None, right=None, dist=0, count=1): + if id < 0: + raise ValueError('The id must be non-negative.') + if dist < 0: + raise ValueError('The distance must be non-negative.') + if (left is None and right is not None) or \ + (left is not None and right is None): + raise ValueError('Only full or proper binary trees are permitted.' + ' This node has one child.') + if count < 1: + raise ValueError('A cluster must contain at least one original ' + 'observation.') + self.id = id + self.left = left + self.right = right + self.dist = dist + if self.left is None: + self.count = count + else: + self.count = left.count + right.count + + def __lt__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + f"to type {type(node)}") + return self.dist < node.dist + + def __gt__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + f"to type {type(node)}") + return self.dist > node.dist + + def __eq__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + f"to type {type(node)}") + return self.dist == node.dist + + def get_id(self): + """ + The identifier of the target node. + + For ``0 <= i < n``, `i` corresponds to original observation i. + For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed + at iteration ``i-n``. + + Returns + ------- + id : int + The identifier of the target node. + + """ + return self.id + + def get_count(self): + """ + The number of leaf nodes (original observations) belonging to + the cluster node nd. If the target node is a leaf, 1 is + returned. + + Returns + ------- + get_count : int + The number of leaf nodes below the target node. + + """ + return self.count + + def get_left(self): + """ + Return a reference to the left child tree object. + + Returns + ------- + left : ClusterNode + The left child of the target node. If the node is a leaf, + None is returned. + + """ + return self.left + + def get_right(self): + """ + Return a reference to the right child tree object. + + Returns + ------- + right : ClusterNode + The left child of the target node. If the node is a leaf, + None is returned. + + """ + return self.right + + def is_leaf(self): + """ + Return True if the target node is a leaf. + + Returns + ------- + leafness : bool + True if the target node is a leaf node. + + """ + return self.left is None + + def pre_order(self, func=(lambda x: x.id)): + """ + Perform pre-order traversal without recursive function calls. + + When a leaf node is first encountered, ``func`` is called with + the leaf node as its argument, and its result is appended to + the list. + + For example, the statement:: + + ids = root.pre_order(lambda x: x.id) + + returns a list of the node ids corresponding to the leaf nodes + of the tree as they appear from left to right. + + Parameters + ---------- + func : function + Applied to each leaf ClusterNode object in the pre-order traversal. + Given the ``i``-th leaf node in the pre-order traversal ``n[i]``, + the result of ``func(n[i])`` is stored in ``L[i]``. If not + provided, the index of the original observation to which the node + corresponds is used. + + Returns + ------- + L : list + The pre-order traversal. + + """ + # Do a preorder traversal, caching the result. To avoid having to do + # recursion, we'll store the previous index we've visited in a vector. + n = self.count + + curNode = [None] * (2 * n) + lvisited = set() + rvisited = set() + curNode[0] = self + k = 0 + preorder = [] + while k >= 0: + nd = curNode[k] + ndid = nd.id + if nd.is_leaf(): + preorder.append(func(nd)) + k = k - 1 + else: + if ndid not in lvisited: + curNode[k + 1] = nd.left + lvisited.add(ndid) + k = k + 1 + elif ndid not in rvisited: + curNode[k + 1] = nd.right + rvisited.add(ndid) + k = k + 1 + # If we've visited the left and right of this non-leaf + # node already, go up in the tree. + else: + k = k - 1 + + return preorder + + +_cnode_bare = ClusterNode(0) +_cnode_type = type(ClusterNode) + + +def _order_cluster_tree(Z): + """ + Return clustering nodes in bottom-up order by distance. + + Parameters + ---------- + Z : scipy.cluster.linkage array + The linkage matrix. + + Returns + ------- + nodes : list + A list of ClusterNode objects. + """ + q = deque() + tree = to_tree(Z) + q.append(tree) + nodes = [] + + while q: + node = q.popleft() + if not node.is_leaf(): + bisect.insort_left(nodes, node) + q.append(node.get_right()) + q.append(node.get_left()) + return nodes + + +def cut_tree(Z, n_clusters=None, height=None): + """ + Given a linkage matrix Z, return the cut tree. + + Parameters + ---------- + Z : scipy.cluster.linkage array + The linkage matrix. + n_clusters : array_like, optional + Number of clusters in the tree at the cut point. + height : array_like, optional + The height at which to cut the tree. Only possible for ultrametric + trees. + + Returns + ------- + cutree : array + An array indicating group membership at each agglomeration step. I.e., + for a full cut tree, in the first column each data point is in its own + cluster. At the next step, two nodes are merged. Finally, all + singleton and non-singleton clusters are in one group. If `n_clusters` + or `height` are given, the columns correspond to the columns of + `n_clusters` or `height`. + + Examples + -------- + >>> from scipy import cluster + >>> import numpy as np + >>> from numpy.random import default_rng + >>> rng = default_rng() + >>> X = rng.random((50, 4)) + >>> Z = cluster.hierarchy.ward(X) + >>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10]) + >>> cutree[:10] + array([[0, 0], + [1, 1], + [2, 2], + [3, 3], + [3, 4], + [2, 2], + [0, 0], + [1, 5], + [3, 6], + [4, 7]]) # random + + """ + xp = array_namespace(Z) + nobs = num_obs_linkage(Z) + nodes = _order_cluster_tree(Z) + + if height is not None and n_clusters is not None: + raise ValueError("At least one of either height or n_clusters " + "must be None") + elif height is None and n_clusters is None: # return the full cut tree + cols_idx = xp.arange(nobs) + elif height is not None: + height = xp.asarray(height) + heights = xp.asarray([x.dist for x in nodes]) + cols_idx = xp.searchsorted(heights, height) + else: + n_clusters = xp.asarray(n_clusters) + cols_idx = nobs - xp.searchsorted(xp.arange(nobs), n_clusters) + + try: + n_cols = len(cols_idx) + except TypeError: # scalar + n_cols = 1 + cols_idx = xp.asarray([cols_idx]) + + groups = xp.zeros((n_cols, nobs), dtype=xp.int64) + last_group = xp.arange(nobs) + if 0 in cols_idx: + groups[0] = last_group + + for i, node in enumerate(nodes): + idx = node.pre_order() + this_group = copy(last_group, xp=xp) + # TODO ARRAY_API complex indexing not supported + this_group[idx] = xp.min(last_group[idx]) + this_group[this_group > xp.max(last_group[idx])] -= 1 + if i + 1 in cols_idx: + groups[np.nonzero(i + 1 == cols_idx)[0]] = this_group + last_group = this_group + + return groups.T + + +def to_tree(Z, rd=False): + """ + Convert a linkage matrix into an easy-to-use tree object. + + The reference to the root `ClusterNode` object is returned (by default). + + Each `ClusterNode` object has a ``left``, ``right``, ``dist``, ``id``, + and ``count`` attribute. The left and right attributes point to + ClusterNode objects that were combined to generate the cluster. + If both are None then the `ClusterNode` object is a leaf node, its count + must be 1, and its distance is meaningless but set to 0. + + *Note: This function is provided for the convenience of the library + user. ClusterNodes are not used as input to any of the functions in this + library.* + + Parameters + ---------- + Z : ndarray + The linkage matrix in proper form (see the `linkage` + function documentation). + rd : bool, optional + When False (default), a reference to the root `ClusterNode` object is + returned. Otherwise, a tuple ``(r, d)`` is returned. ``r`` is a + reference to the root node while ``d`` is a list of `ClusterNode` + objects - one per original entry in the linkage matrix plus entries + for all clustering steps. If a cluster id is + less than the number of samples ``n`` in the data that the linkage + matrix describes, then it corresponds to a singleton cluster (leaf + node). + See `linkage` for more information on the assignment of cluster ids + to clusters. + + Returns + ------- + tree : ClusterNode or tuple (ClusterNode, list of ClusterNode) + If ``rd`` is False, a `ClusterNode`. + If ``rd`` is True, a list of length ``2*n - 1``, with ``n`` the number + of samples. See the description of `rd` above for more details. + + See Also + -------- + linkage, is_valid_linkage, ClusterNode + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> rng = np.random.default_rng() + >>> x = rng.random((5, 2)) + >>> Z = hierarchy.linkage(x) + >>> hierarchy.to_tree(Z) + >> rootnode, nodelist = hierarchy.to_tree(Z, rd=True) + >>> rootnode + >> len(nodelist) + 9 + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + # Number of original objects is equal to the number of rows plus 1. + n = Z.shape[0] + 1 + + # Create a list full of None's to store the node objects + d = [None] * (n * 2 - 1) + + # Create the nodes corresponding to the n original objects. + for i in range(0, n): + d[i] = ClusterNode(i) + + nd = None + + for i in range(Z.shape[0]): + row = Z[i, :] + + fi = int_floor(row[0], xp) + fj = int_floor(row[1], xp) + if fi > i + n: + raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' + 'is used before it is formed. See row %d, ' + 'column 0') % fi) + if fj > i + n: + raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' + 'is used before it is formed. See row %d, ' + 'column 1') % fj) + + nd = ClusterNode(i + n, d[fi], d[fj], row[2]) + # ^ id ^ left ^ right ^ dist + if row[3] != nd.count: + raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is ' + 'incorrect.') % i) + d[n + i] = nd + + if rd: + return (nd, d) + else: + return nd + + +def optimal_leaf_ordering(Z, y, metric='euclidean'): + """ + Given a linkage matrix Z and distance, reorder the cut tree. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. See + `linkage` for more information on the return structure and + algorithm. + y : ndarray + The condensed distance matrix from which Z was generated. + Alternatively, a collection of m observation vectors in n + dimensions may be passed as an m by n array. + metric : str or function, optional + The distance metric to use in the case that y is a collection of + observation vectors; ignored otherwise. See the ``pdist`` + function for a list of valid distance metrics. A custom distance + function can also be used. + + Returns + ------- + Z_ordered : ndarray + A copy of the linkage matrix Z, reordered to minimize the distance + between adjacent leaves. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> rng = np.random.default_rng() + >>> X = rng.standard_normal((10, 10)) + >>> Z = hierarchy.ward(X) + >>> hierarchy.leaves_list(Z) + array([0, 3, 1, 9, 2, 5, 7, 4, 6, 8], dtype=int32) + >>> hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, X)) + array([3, 0, 2, 5, 7, 4, 8, 6, 9, 1], dtype=int32) + + """ + xp = array_namespace(Z, y) + Z = _asarray(Z, order='C', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + y = _asarray(y, order='C', dtype=xp.float64, xp=xp) + + if y.ndim == 1: + distance.is_valid_y(y, throw=True, name='y') + elif y.ndim == 2: + if (y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0) and + np.all(y >= 0) and np.allclose(y, y.T)): + warnings.warn('The symmetric non-negative hollow observation ' + 'matrix looks suspiciously like an uncondensed ' + 'distance matrix', + ClusterWarning, stacklevel=2) + y = distance.pdist(y, metric) + y = xp.asarray(y) + else: + raise ValueError("`y` must be 1 or 2 dimensional.") + + if not xp.all(xp.isfinite(y)): + raise ValueError("The condensed distance matrix must contain only " + "finite values.") + + Z = np.asarray(Z) + y = np.asarray(y) + return xp.asarray(_optimal_leaf_ordering.optimal_leaf_ordering(Z, y)) + + +def cophenet(Z, Y=None): + """ + Calculate the cophenetic distances between each observation in + the hierarchical clustering defined by the linkage ``Z``. + + Suppose ``p`` and ``q`` are original observations in + disjoint clusters ``s`` and ``t``, respectively and + ``s`` and ``t`` are joined by a direct parent cluster + ``u``. The cophenetic distance between observations + ``i`` and ``j`` is simply the distance between + clusters ``s`` and ``t``. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as an array + (see `linkage` function). + Y : ndarray (optional) + Calculates the cophenetic correlation coefficient ``c`` of a + hierarchical clustering defined by the linkage matrix `Z` + of a set of :math:`n` observations in :math:`m` + dimensions. `Y` is the condensed distance matrix from which + `Z` was generated. + + Returns + ------- + c : ndarray + The cophentic correlation distance (if ``Y`` is passed). + d : ndarray + The cophenetic distance matrix in condensed form. The + :math:`ij` th entry is the cophenetic distance between + original observations :math:`i` and :math:`j`. + + See Also + -------- + linkage : + for a description of what a linkage matrix is. + scipy.spatial.distance.squareform : + transforming condensed matrices into square ones. + + Examples + -------- + >>> from scipy.cluster.hierarchy import single, cophenet + >>> from scipy.spatial.distance import pdist, squareform + + Given a dataset ``X`` and a linkage matrix ``Z``, the cophenetic distance + between two points of ``X`` is the distance between the largest two + distinct clusters that each of the points: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + ``X`` corresponds to this dataset :: + + x x x x + x x + + x x + x x x x + + >>> Z = single(pdist(X)) + >>> Z + array([[ 0., 1., 1., 2.], + [ 2., 12., 1., 3.], + [ 3., 4., 1., 2.], + [ 5., 14., 1., 3.], + [ 6., 7., 1., 2.], + [ 8., 16., 1., 3.], + [ 9., 10., 1., 2.], + [11., 18., 1., 3.], + [13., 15., 2., 6.], + [17., 20., 2., 9.], + [19., 21., 2., 12.]]) + >>> cophenet(Z) + array([1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 2., 2., + 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 1., 1., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 1., 1., 1.]) + + The output of the `scipy.cluster.hierarchy.cophenet` method is + represented in condensed form. We can use + `scipy.spatial.distance.squareform` to see the output as a + regular matrix (where each element ``ij`` denotes the cophenetic distance + between each ``i``, ``j`` pair of points in ``X``): + + >>> squareform(cophenet(Z)) + array([[0., 1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [1., 0., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [1., 1., 0., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 0., 1., 1., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 1., 0., 1., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 1., 1., 0., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 0., 1., 1., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 1., 0., 1., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 1., 1., 0., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 1., 1.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 0., 1.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 0.]]) + + In this example, the cophenetic distance between points on ``X`` that are + very close (i.e., in the same corner) is 1. For other pairs of points is 2, + because the points will be located in clusters at different + corners - thus, the distance between these clusters will be larger. + + """ + xp = array_namespace(Z, Y) + # Ensure float64 C-contiguous array. Cython code doesn't deal with striding. + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + n = Z.shape[0] + 1 + zz = np.zeros((n * (n-1)) // 2, dtype=np.float64) + + Z = np.asarray(Z) + _hierarchy.cophenetic_distances(Z, zz, int(n)) + zz = xp.asarray(zz) + if Y is None: + return zz + + Y = _asarray(Y, order='C', xp=xp) + distance.is_valid_y(Y, throw=True, name='Y') + + z = xp.mean(zz) + y = xp.mean(Y) + Yy = Y - y + Zz = zz - z + numerator = (Yy * Zz) + denomA = Yy**2 + denomB = Zz**2 + c = xp.sum(numerator) / xp.sqrt(xp.sum(denomA) * xp.sum(denomB)) + return (c, zz) + + +def inconsistent(Z, d=2): + r""" + Calculate inconsistency statistics on a linkage matrix. + + Parameters + ---------- + Z : ndarray + The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical + clustering). See `linkage` documentation for more information on its + form. + d : int, optional + The number of links up to `d` levels below each non-singleton cluster. + + Returns + ------- + R : ndarray + A :math:`(n-1)` by 4 matrix where the ``i``'th row contains the link + statistics for the non-singleton cluster ``i``. The link statistics are + computed over the link heights for links :math:`d` levels below the + cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard + deviation of the link heights, respectively; ``R[i,2]`` is the number + of links included in the calculation; and ``R[i,3]`` is the + inconsistency coefficient, + + .. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]} + + Notes + ----- + This function behaves similarly to the MATLAB(TM) ``inconsistent`` + function. + + Examples + -------- + >>> from scipy.cluster.hierarchy import inconsistent, linkage + >>> from matplotlib import pyplot as plt + >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] + >>> Z = linkage(X, 'ward') + >>> print(Z) + [[ 5. 6. 0. 2. ] + [ 2. 7. 0. 2. ] + [ 0. 4. 1. 2. ] + [ 1. 8. 1.15470054 3. ] + [ 9. 10. 2.12132034 4. ] + [ 3. 12. 4.11096096 5. ] + [11. 13. 14.07183949 8. ]] + >>> inconsistent(Z) + array([[ 0. , 0. , 1. , 0. ], + [ 0. , 0. , 1. , 0. ], + [ 1. , 0. , 1. , 0. ], + [ 0.57735027, 0.81649658, 2. , 0.70710678], + [ 1.04044011, 1.06123822, 3. , 1.01850858], + [ 3.11614065, 1.40688837, 2. , 0.70710678], + [ 6.44583366, 6.76770586, 3. , 1.12682288]]) + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + if (not d == np.floor(d)) or d < 0: + raise ValueError('The second argument d must be a nonnegative ' + 'integer value.') + + n = Z.shape[0] + 1 + R = np.zeros((n - 1, 4), dtype=np.float64) + + Z = np.asarray(Z) + _hierarchy.inconsistent(Z, R, int(n), int(d)) + R = xp.asarray(R) + return R + + +def from_mlab_linkage(Z): + """ + Convert a linkage matrix generated by MATLAB(TM) to a new + linkage matrix compatible with this module. + + The conversion does two things: + + * the indices are converted from ``1..N`` to ``0..(N-1)`` form, + and + + * a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the + number of original observations (leaves) in the non-singleton + cluster ``i``. + + This function is useful when loading in linkages from legacy data + files generated by MATLAB. + + Parameters + ---------- + Z : ndarray + A linkage matrix generated by MATLAB(TM). + + Returns + ------- + ZS : ndarray + A linkage matrix compatible with ``scipy.cluster.hierarchy``. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + to_mlab_linkage : transform from SciPy to MATLAB format. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.hierarchy import ward, from_mlab_linkage + + Given a linkage matrix in MATLAB format ``mZ``, we can use + `scipy.cluster.hierarchy.from_mlab_linkage` to import + it into SciPy format: + + >>> mZ = np.array([[1, 2, 1], [4, 5, 1], [7, 8, 1], + ... [10, 11, 1], [3, 13, 1.29099445], + ... [6, 14, 1.29099445], + ... [9, 15, 1.29099445], + ... [12, 16, 1.29099445], + ... [17, 18, 5.77350269], + ... [19, 20, 5.77350269], + ... [21, 22, 8.16496581]]) + + >>> Z = from_mlab_linkage(mZ) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [ 11. , 15. , 1.29099445, 3. ], + [ 16. , 17. , 5.77350269, 6. ], + [ 18. , 19. , 5.77350269, 6. ], + [ 20. , 21. , 8.16496581, 12. ]]) + + As expected, the linkage matrix ``Z`` returned includes an + additional column counting the number of original samples in + each cluster. Also, all cluster indices are reduced by 1 + (MATLAB format uses 1-indexing, whereas SciPy uses 0-indexing). + + """ + xp = array_namespace(Z) + Z = _asarray(Z, dtype=xp.float64, order='C', xp=xp) + Zs = Z.shape + + # If it's empty, return it. + if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): + return copy(Z, xp=xp) + + if len(Zs) != 2: + raise ValueError("The linkage array must be rectangular.") + + # If it contains no rows, return it. + if Zs[0] == 0: + return copy(Z, xp=xp) + + Zpart = copy(Z, xp=xp) + if xp.min(Zpart[:, 0:2]) != 1.0 and xp.max(Zpart[:, 0:2]) != 2 * Zs[0]: + raise ValueError('The format of the indices is not 1..N') + + Zpart[:, 0:2] -= 1.0 + CS = np.zeros((Zs[0],), dtype=np.float64) + Zpart = np.asarray(Zpart) + _hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1) + res = np.hstack([Zpart, CS.reshape(Zs[0], 1)]) + return xp.asarray(res) + + +def to_mlab_linkage(Z): + """ + Convert a linkage matrix to a MATLAB(TM) compatible one. + + Converts a linkage matrix ``Z`` generated by the linkage function + of this module to a MATLAB(TM) compatible one. The return linkage + matrix has the last column removed and the cluster indices are + converted to ``1..N`` indexing. + + Parameters + ---------- + Z : ndarray + A linkage matrix generated by ``scipy.cluster.hierarchy``. + + Returns + ------- + to_mlab_linkage : ndarray + A linkage matrix compatible with MATLAB(TM)'s hierarchical + clustering functions. + + The return linkage matrix has the last column removed + and the cluster indices are converted to ``1..N`` indexing. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + from_mlab_linkage : transform from Matlab to SciPy format. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, to_mlab_linkage + >>> from scipy.spatial.distance import pdist + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + After a linkage matrix ``Z`` has been created, we can use + `scipy.cluster.hierarchy.to_mlab_linkage` to convert it + into MATLAB format: + + >>> mZ = to_mlab_linkage(Z) + >>> mZ + array([[ 1. , 2. , 1. ], + [ 4. , 5. , 1. ], + [ 7. , 8. , 1. ], + [ 10. , 11. , 1. ], + [ 3. , 13. , 1.29099445], + [ 6. , 14. , 1.29099445], + [ 9. , 15. , 1.29099445], + [ 12. , 16. , 1.29099445], + [ 17. , 18. , 5.77350269], + [ 19. , 20. , 5.77350269], + [ 21. , 22. , 8.16496581]]) + + The new linkage matrix ``mZ`` uses 1-indexing for all the + clusters (instead of 0-indexing). Also, the last column of + the original linkage matrix has been dropped. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + Zs = Z.shape + if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): + return copy(Z, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + ZP = copy(Z[:, 0:3], xp=xp) + ZP[:, 0:2] += 1.0 + + return ZP + + +def is_monotonic(Z): + """ + Return True if the linkage passed is monotonic. + + The linkage is monotonic if for every cluster :math:`s` and :math:`t` + joined, the distance between them is no less than the distance + between any previously joined clusters. + + Parameters + ---------- + Z : ndarray + The linkage matrix to check for monotonicity. + + Returns + ------- + b : bool + A boolean indicating whether the linkage is monotonic. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, ward, is_monotonic + >>> from scipy.spatial.distance import pdist + + By definition, some hierarchical clustering algorithms - such as + `scipy.cluster.hierarchy.ward` - produce monotonic assignments of + samples to clusters; however, this is not always true for other + hierarchical methods - e.g. `scipy.cluster.hierarchy.median`. + + Given a linkage matrix ``Z`` (as the result of a hierarchical clustering + method) we can test programmatically whether it has the monotonicity + property or not, using `scipy.cluster.hierarchy.is_monotonic`: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> is_monotonic(Z) + True + + >>> Z = median(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> is_monotonic(Z) + False + + Note that this method is equivalent to just verifying that the distances + in the third column of the linkage matrix appear in a monotonically + increasing order. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + # We expect the i'th value to be greater than its successor. + return xp.all(Z[1:, 2] >= Z[:-1, 2]) + + +def is_valid_im(R, warning=False, throw=False, name=None): + """Return True if the inconsistency matrix passed is valid. + + It must be a :math:`n` by 4 array of doubles. The standard + deviations ``R[:,1]`` must be nonnegative. The link counts + ``R[:,2]`` must be positive and no greater than :math:`n-1`. + + Parameters + ---------- + R : ndarray + The inconsistency matrix to check for validity. + warning : bool, optional + When True, issues a Python warning if the linkage + matrix passed is invalid. + throw : bool, optional + When True, throws a Python exception if the linkage + matrix passed is invalid. + name : str, optional + This string refers to the variable name of the invalid + linkage matrix. + + Returns + ------- + b : bool + True if the inconsistency matrix is valid. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + inconsistent : for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, inconsistent, is_valid_im + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> R = inconsistent(Z) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [2.78516386, 2.58797734, 3. , 1.15470054], + [2.78516386, 2.58797734, 3. , 1.15470054], + [6.57065706, 1.38071187, 3. , 1.15470054]]) + + Now we can use `scipy.cluster.hierarchy.is_valid_im` to verify that + ``R`` is correct: + + >>> is_valid_im(R) + True + + However, if ``R`` is wrongly constructed (e.g., one of the standard + deviations is set to a negative value), then the check will fail: + + >>> R[-1,1] = R[-1,1] * -1 + >>> is_valid_im(R) + False + + """ + xp = array_namespace(R) + R = _asarray(R, order='c', xp=xp) + valid = True + name_str = "%r " % name if name else '' + try: + if R.dtype != xp.float64: + raise TypeError('Inconsistency matrix %smust contain doubles ' + '(double).' % name_str) + if len(R.shape) != 2: + raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. ' + 'be two-dimensional).' % name_str) + if R.shape[1] != 4: + raise ValueError('Inconsistency matrix %smust have 4 columns.' % + name_str) + if R.shape[0] < 1: + raise ValueError('Inconsistency matrix %smust have at least one ' + 'row.' % name_str) + if xp.any(R[:, 0] < 0): + raise ValueError('Inconsistency matrix %scontains negative link ' + 'height means.' % name_str) + if xp.any(R[:, 1] < 0): + raise ValueError('Inconsistency matrix %scontains negative link ' + 'height standard deviations.' % name_str) + if xp.any(R[:, 2] < 0): + raise ValueError('Inconsistency matrix %scontains negative link ' + 'counts.' % name_str) + except Exception as e: + if throw: + raise + if warning: + _warning(str(e)) + valid = False + + return valid + + +def is_valid_linkage(Z, warning=False, throw=False, name=None): + """ + Check the validity of a linkage matrix. + + A linkage matrix is valid if it is a 2-D array (type double) + with :math:`n` rows and 4 columns. The first two columns must contain + indices between 0 and :math:`2n-1`. For a given row ``i``, the following + two expressions have to hold: + + .. math:: + + 0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1 + 0 \\leq Z[i,1] \\leq i+n-1 + + I.e., a cluster cannot join another cluster unless the cluster being joined + has been generated. + + Parameters + ---------- + Z : array_like + Linkage matrix. + warning : bool, optional + When True, issues a Python warning if the linkage + matrix passed is invalid. + throw : bool, optional + When True, throws a Python exception if the linkage + matrix passed is invalid. + name : str, optional + This string refers to the variable name of the invalid + linkage matrix. + + Returns + ------- + b : bool + True if the inconsistency matrix is valid. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, is_valid_linkage + >>> from scipy.spatial.distance import pdist + + All linkage matrices generated by the clustering methods in this module + will be valid (i.e., they will have the appropriate dimensions and the two + required expressions will hold for all the rows). + + We can check this using `scipy.cluster.hierarchy.is_valid_linkage`: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> is_valid_linkage(Z) + True + + However, if we create a linkage matrix in a wrong way - or if we modify + a valid one in a way that any of the required expressions don't hold + anymore, then the check will fail: + + >>> Z[3][1] = 20 # the cluster number 20 is not defined at this point + >>> is_valid_linkage(Z) + False + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + valid = True + name_str = "%r " % name if name else '' + try: + if Z.dtype != xp.float64: + raise TypeError('Linkage matrix %smust contain doubles.' % name_str) + if len(Z.shape) != 2: + raise ValueError('Linkage matrix %smust have shape=2 (i.e. be ' + 'two-dimensional).' % name_str) + if Z.shape[1] != 4: + raise ValueError('Linkage matrix %smust have 4 columns.' % name_str) + if Z.shape[0] == 0: + raise ValueError('Linkage must be computed on at least two ' + 'observations.') + n = Z.shape[0] + if n > 1: + if (xp.any(Z[:, 0] < 0) or xp.any(Z[:, 1] < 0)): + raise ValueError('Linkage %scontains negative indices.' % + name_str) + if xp.any(Z[:, 2] < 0): + raise ValueError('Linkage %scontains negative distances.' % + name_str) + if xp.any(Z[:, 3] < 0): + raise ValueError('Linkage %scontains negative counts.' % + name_str) + if _check_hierarchy_uses_cluster_before_formed(Z): + raise ValueError('Linkage %suses non-singleton cluster before ' + 'it is formed.' % name_str) + if _check_hierarchy_uses_cluster_more_than_once(Z): + raise ValueError('Linkage %suses the same cluster more than once.' + % name_str) + except Exception as e: + if throw: + raise + if warning: + _warning(str(e)) + valid = False + + return valid + + +def _check_hierarchy_uses_cluster_before_formed(Z): + n = Z.shape[0] + 1 + for i in range(0, n - 1): + if Z[i, 0] >= n + i or Z[i, 1] >= n + i: + return True + return False + + +def _check_hierarchy_uses_cluster_more_than_once(Z): + n = Z.shape[0] + 1 + chosen = set() + for i in range(0, n - 1): + used_more_than_once = ( + (float(Z[i, 0]) in chosen) + or (float(Z[i, 1]) in chosen) + or Z[i, 0] == Z[i, 1] + ) + if used_more_than_once: + return True + chosen.add(float(Z[i, 0])) + chosen.add(float(Z[i, 1])) + return False + + +def _check_hierarchy_not_all_clusters_used(Z): + n = Z.shape[0] + 1 + chosen = set() + for i in range(0, n - 1): + chosen.add(int(Z[i, 0])) + chosen.add(int(Z[i, 1])) + must_chosen = set(range(0, 2 * n - 2)) + return len(must_chosen.difference(chosen)) > 0 + + +def num_obs_linkage(Z): + """ + Return the number of original observations of the linkage matrix passed. + + Parameters + ---------- + Z : ndarray + The linkage matrix on which to perform the operation. + + Returns + ------- + n : int + The number of original observations in the linkage. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, num_obs_linkage + >>> from scipy.spatial.distance import pdist + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + ``Z`` is a linkage matrix obtained after using the Ward clustering method + with ``X``, a dataset with 12 data points. + + >>> num_obs_linkage(Z) + 12 + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + return (Z.shape[0] + 1) + + +def correspond(Z, Y): + """ + Check for correspondence between linkage and condensed distance matrices. + + They must have the same number of original observations for + the check to succeed. + + This function is useful as a sanity check in algorithms that make + extensive use of linkage and distance matrices that must + correspond to the same set of original observations. + + Parameters + ---------- + Z : array_like + The linkage matrix to check for correspondence. + Y : array_like + The condensed distance matrix to check for correspondence. + + Returns + ------- + b : bool + A boolean indicating whether the linkage matrix and distance + matrix could possibly correspond to one another. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, correspond + >>> from scipy.spatial.distance import pdist + + This method can be used to check if a given linkage matrix ``Z`` has been + obtained from the application of a cluster method over a dataset ``X``: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + >>> X_condensed = pdist(X) + >>> Z = ward(X_condensed) + + Here, we can compare ``Z`` and ``X`` (in condensed form): + + >>> correspond(Z, X_condensed) + True + + """ + is_valid_linkage(Z, throw=True) + distance.is_valid_y(Y, throw=True) + xp = array_namespace(Z, Y) + Z = _asarray(Z, order='c', xp=xp) + Y = _asarray(Y, order='c', xp=xp) + return distance.num_obs_y(Y) == num_obs_linkage(Z) + + +def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None): + """ + Form flat clusters from the hierarchical clustering defined by + the given linkage matrix. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded with the matrix returned + by the `linkage` function. + t : scalar + For criteria 'inconsistent', 'distance' or 'monocrit', + this is the threshold to apply when forming flat clusters. + For 'maxclust' or 'maxclust_monocrit' criteria, + this would be max number of clusters requested. + criterion : str, optional + The criterion to use in forming flat clusters. This can + be any of the following values: + + ``inconsistent`` : + If a cluster node and all its + descendants have an inconsistent value less than or equal + to `t`, then all its leaf descendants belong to the + same flat cluster. When no non-singleton cluster meets + this criterion, every node is assigned to its own + cluster. (Default) + + ``distance`` : + Forms flat clusters so that the original + observations in each flat cluster have no greater a + cophenetic distance than `t`. + + ``maxclust`` : + Finds a minimum threshold ``r`` so that + the cophenetic distance between any two original + observations in the same flat cluster is no more than + ``r`` and no more than `t` flat clusters are formed. + + ``monocrit`` : + Forms a flat cluster from a cluster node c + with index i when ``monocrit[j] <= t``. + + For example, to threshold on the maximum mean distance + as computed in the inconsistency matrix R with a + threshold of 0.8 do:: + + MR = maxRstat(Z, R, 3) + fcluster(Z, t=0.8, criterion='monocrit', monocrit=MR) + + ``maxclust_monocrit`` : + Forms a flat cluster from a + non-singleton cluster node ``c`` when ``monocrit[i] <= + r`` for all cluster indices ``i`` below and including + ``c``. ``r`` is minimized such that no more than ``t`` + flat clusters are formed. monocrit must be + monotonic. For example, to minimize the threshold t on + maximum inconsistency values so that no more than 3 flat + clusters are formed, do:: + + MI = maxinconsts(Z, R) + fcluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI) + depth : int, optional + The maximum depth to perform the inconsistency calculation. + It has no meaning for the other criteria. Default is 2. + R : ndarray, optional + The inconsistency matrix to use for the ``'inconsistent'`` + criterion. This matrix is computed if not provided. + monocrit : ndarray, optional + An array of length n-1. `monocrit[i]` is the + statistics upon which non-singleton i is thresholded. The + monocrit vector must be monotonic, i.e., given a node c with + index i, for all node indices j corresponding to nodes + below c, ``monocrit[i] >= monocrit[j]``. + + Returns + ------- + fcluster : ndarray + An array of length ``n``. ``T[i]`` is the flat cluster number to + which original observation ``i`` belongs. + + See Also + -------- + linkage : for information about hierarchical clustering methods work. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster + >>> from scipy.spatial.distance import pdist + + All cluster linkage methods - e.g., `scipy.cluster.hierarchy.ward` + generate a linkage matrix ``Z`` as their output: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + This matrix represents a dendrogram, where the first and second elements + are the two clusters merged at each step, the third element is the + distance between these clusters, and the fourth element is the size of + the new cluster - the number of original data points included. + + `scipy.cluster.hierarchy.fcluster` can be used to flatten the + dendrogram, obtaining as a result an assignation of the original data + points to single clusters. + + This assignation mostly depends on a distance threshold ``t`` - the maximum + inter-cluster distance allowed: + + >>> fcluster(Z, t=0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + + >>> fcluster(Z, t=1.1, criterion='distance') + array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32) + + >>> fcluster(Z, t=3, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + >>> fcluster(Z, t=9, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + In the first case, the threshold ``t`` is too small to allow any two + samples in the data to form a cluster, so 12 different clusters are + returned. + + In the second case, the threshold is large enough to allow the first + 4 points to be merged with their nearest neighbors. So, here, only 8 + clusters are returned. + + The third case, with a much higher threshold, allows for up to 8 data + points to be connected - so 4 clusters are returned here. + + Lastly, the threshold of the fourth case is large enough to allow for + all data points to be merged together - so a single cluster is returned. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + n = Z.shape[0] + 1 + T = np.zeros((n,), dtype='i') + + if monocrit is not None: + monocrit = np.asarray(monocrit, order='C', dtype=np.float64) + + Z = np.asarray(Z) + monocrit = np.asarray(monocrit) + if criterion == 'inconsistent': + if R is None: + R = inconsistent(Z, depth) + else: + R = _asarray(R, order='C', dtype=xp.float64, xp=xp) + is_valid_im(R, throw=True, name='R') + # Since the C code does not support striding using strides. + # The dimensions are used instead. + R = np.asarray(R) + _hierarchy.cluster_in(Z, R, T, float(t), int(n)) + elif criterion == 'distance': + _hierarchy.cluster_dist(Z, T, float(t), int(n)) + elif criterion == 'maxclust': + _hierarchy.cluster_maxclust_dist(Z, T, int(n), t) + elif criterion == 'monocrit': + _hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n)) + elif criterion == 'maxclust_monocrit': + _hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t)) + else: + raise ValueError('Invalid cluster formation criterion: %s' + % str(criterion)) + return xp.asarray(T) + + +def fclusterdata(X, t, criterion='inconsistent', + metric='euclidean', depth=2, method='single', R=None): + """ + Cluster observation data using a given metric. + + Clusters the original observations in the n-by-m data + matrix X (n observations in m dimensions), using the euclidean + distance metric to calculate distances between original observations, + performs hierarchical clustering using the single linkage algorithm, + and forms flat clusters using the inconsistency method with `t` as the + cut-off threshold. + + A 1-D array ``T`` of length ``n`` is returned. ``T[i]`` is + the index of the flat cluster to which the original observation ``i`` + belongs. + + Parameters + ---------- + X : (N, M) ndarray + N by M data matrix with N observations in M dimensions. + t : scalar + For criteria 'inconsistent', 'distance' or 'monocrit', + this is the threshold to apply when forming flat clusters. + For 'maxclust' or 'maxclust_monocrit' criteria, + this would be max number of clusters requested. + criterion : str, optional + Specifies the criterion for forming flat clusters. Valid + values are 'inconsistent' (default), 'distance', or 'maxclust' + cluster formation algorithms. See `fcluster` for descriptions. + metric : str or function, optional + The distance metric for calculating pairwise distances. See + ``distance.pdist`` for descriptions and linkage to verify + compatibility with the linkage method. + depth : int, optional + The maximum depth for the inconsistency calculation. See + `inconsistent` for more information. + method : str, optional + The linkage method to use (single, complete, average, + weighted, median centroid, ward). See `linkage` for more + information. Default is "single". + R : ndarray, optional + The inconsistency matrix. It will be computed if necessary + if it is not passed. + + Returns + ------- + fclusterdata : ndarray + A vector of length n. T[i] is the flat cluster number to + which original observation i belongs. + + See Also + -------- + scipy.spatial.distance.pdist : pairwise distance metrics + + Notes + ----- + This function is similar to the MATLAB function ``clusterdata``. + + Examples + -------- + >>> from scipy.cluster.hierarchy import fclusterdata + + This is a convenience method that abstracts all the steps to perform in a + typical SciPy's hierarchical clustering workflow. + + * Transform the input data into a condensed matrix with + `scipy.spatial.distance.pdist`. + + * Apply a clustering method. + + * Obtain flat clusters at a user defined distance threshold ``t`` using + `scipy.cluster.hierarchy.fcluster`. + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> fclusterdata(X, t=1) + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + + The output here (for the dataset ``X``, distance threshold ``t``, and the + default settings) is four clusters with three data points each. + + """ + xp = array_namespace(X) + X = _asarray(X, order='C', dtype=xp.float64, xp=xp) + + if X.ndim != 2: + raise TypeError('The observation matrix X must be an n by m ' + 'array.') + + Y = distance.pdist(X, metric=metric) + Y = xp.asarray(Y) + Z = linkage(Y, method=method) + if R is None: + R = inconsistent(Z, d=depth) + else: + R = _asarray(R, order='c', xp=xp) + T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t) + return T + + +def leaves_list(Z): + """ + Return a list of leaf node ids. + + The return corresponds to the observation vector index as it appears + in the tree from left to right. Z is a linkage matrix. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. `Z` is + a linkage matrix. See `linkage` for more information. + + Returns + ------- + leaves_list : ndarray + The list of leaf node ids. + + See Also + -------- + dendrogram : for information about dendrogram structure. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, dendrogram, leaves_list + >>> from scipy.spatial.distance import pdist + >>> from matplotlib import pyplot as plt + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + The linkage matrix ``Z`` represents a dendrogram, that is, a tree that + encodes the structure of the clustering performed. + `scipy.cluster.hierarchy.leaves_list` shows the mapping between + indices in the ``X`` dataset and leaves in the dendrogram: + + >>> leaves_list(Z) + array([ 2, 0, 1, 5, 3, 4, 8, 6, 7, 11, 9, 10], dtype=int32) + + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + >>> plt.show() + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + n = Z.shape[0] + 1 + ML = np.zeros((n,), dtype='i') + Z = np.asarray(Z) + _hierarchy.prelist(Z, ML, n) + return xp.asarray(ML) + + +# Maps number of leaves to text size. +# +# p <= 20, size="12" +# 20 < p <= 30, size="10" +# 30 < p <= 50, size="8" +# 50 < p <= np.inf, size="6" + +_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5} +_drotation = {20: 0, 40: 45, np.inf: 90} +_dtextsortedkeys = list(_dtextsizes.keys()) +_dtextsortedkeys.sort() +_drotationsortedkeys = list(_drotation.keys()) +_drotationsortedkeys.sort() + + +def _remove_dups(L): + """ + Remove duplicates AND preserve the original order of the elements. + + The set class is not guaranteed to do this. + """ + seen_before = set() + L2 = [] + for i in L: + if i not in seen_before: + seen_before.add(i) + L2.append(i) + return L2 + + +def _get_tick_text_size(p): + for k in _dtextsortedkeys: + if p <= k: + return _dtextsizes[k] + + +def _get_tick_rotation(p): + for k in _drotationsortedkeys: + if p <= k: + return _drotation[k] + + +def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation, + no_labels, color_list, leaf_font_size=None, + leaf_rotation=None, contraction_marks=None, + ax=None, above_threshold_color='C0'): + # Import matplotlib here so that it's not imported unless dendrograms + # are plotted. Raise an informative error if importing fails. + try: + # if an axis is provided, don't use pylab at all + if ax is None: + import matplotlib.pylab + import matplotlib.patches + import matplotlib.collections + except ImportError as e: + raise ImportError("You must install the matplotlib library to plot " + "the dendrogram. Use no_plot=True to calculate the " + "dendrogram without plotting.") from e + + if ax is None: + ax = matplotlib.pylab.gca() + # if we're using pylab, we want to trigger a draw at the end + trigger_redraw = True + else: + trigger_redraw = False + + # Independent variable plot width + ivw = len(ivl) * 10 + # Dependent variable plot height + dvw = mh + mh * 0.05 + + iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10) + if orientation in ('top', 'bottom'): + if orientation == 'top': + ax.set_ylim([0, dvw]) + ax.set_xlim([0, ivw]) + else: + ax.set_ylim([dvw, 0]) + ax.set_xlim([0, ivw]) + + xlines = icoords + ylines = dcoords + if no_labels: + ax.set_xticks([]) + ax.set_xticklabels([]) + else: + ax.set_xticks(iv_ticks) + + if orientation == 'top': + ax.xaxis.set_ticks_position('bottom') + else: + ax.xaxis.set_ticks_position('top') + + # Make the tick marks invisible because they cover up the links + for line in ax.get_xticklines(): + line.set_visible(False) + + leaf_rot = (float(_get_tick_rotation(len(ivl))) + if (leaf_rotation is None) else leaf_rotation) + leaf_font = (float(_get_tick_text_size(len(ivl))) + if (leaf_font_size is None) else leaf_font_size) + ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font) + + elif orientation in ('left', 'right'): + if orientation == 'left': + ax.set_xlim([dvw, 0]) + ax.set_ylim([0, ivw]) + else: + ax.set_xlim([0, dvw]) + ax.set_ylim([0, ivw]) + + xlines = dcoords + ylines = icoords + if no_labels: + ax.set_yticks([]) + ax.set_yticklabels([]) + else: + ax.set_yticks(iv_ticks) + + if orientation == 'left': + ax.yaxis.set_ticks_position('right') + else: + ax.yaxis.set_ticks_position('left') + + # Make the tick marks invisible because they cover up the links + for line in ax.get_yticklines(): + line.set_visible(False) + + leaf_font = (float(_get_tick_text_size(len(ivl))) + if (leaf_font_size is None) else leaf_font_size) + + if leaf_rotation is not None: + ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font) + else: + ax.set_yticklabels(ivl, size=leaf_font) + + # Let's use collections instead. This way there is a separate legend item + # for each tree grouping, rather than stupidly one for each line segment. + colors_used = _remove_dups(color_list) + color_to_lines = {} + for color in colors_used: + color_to_lines[color] = [] + for (xline, yline, color) in zip(xlines, ylines, color_list): + color_to_lines[color].append(list(zip(xline, yline))) + + colors_to_collections = {} + # Construct the collections. + for color in colors_used: + coll = matplotlib.collections.LineCollection(color_to_lines[color], + colors=(color,)) + colors_to_collections[color] = coll + + # Add all the groupings below the color threshold. + for color in colors_used: + if color != above_threshold_color: + ax.add_collection(colors_to_collections[color]) + # If there's a grouping of links above the color threshold, it goes last. + if above_threshold_color in colors_to_collections: + ax.add_collection(colors_to_collections[above_threshold_color]) + + if contraction_marks is not None: + Ellipse = matplotlib.patches.Ellipse + for (x, y) in contraction_marks: + if orientation in ('left', 'right'): + e = Ellipse((y, x), width=dvw / 100, height=1.0) + else: + e = Ellipse((x, y), width=1.0, height=dvw / 100) + ax.add_artist(e) + e.set_clip_box(ax.bbox) + e.set_alpha(0.5) + e.set_facecolor('k') + + if trigger_redraw: + matplotlib.pylab.draw_if_interactive() + + +# C0 is used for above threshold color +_link_line_colors_default = ('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9') +_link_line_colors = list(_link_line_colors_default) + + +def set_link_color_palette(palette): + """ + Set list of matplotlib color codes for use by dendrogram. + + Note that this palette is global (i.e., setting it once changes the colors + for all subsequent calls to `dendrogram`) and that it affects only the + the colors below ``color_threshold``. + + Note that `dendrogram` also accepts a custom coloring function through its + ``link_color_func`` keyword, which is more flexible and non-global. + + Parameters + ---------- + palette : list of str or None + A list of matplotlib color codes. The order of the color codes is the + order in which the colors are cycled through when color thresholding in + the dendrogram. + + If ``None``, resets the palette to its default (which are matplotlib + default colors C1 to C9). + + Returns + ------- + None + + See Also + -------- + dendrogram + + Notes + ----- + Ability to reset the palette with ``None`` added in SciPy 0.17.0. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., + ... 400., 754., 564., 138., 219., 869., 669.]) + >>> Z = hierarchy.linkage(ytdist, 'single') + >>> dn = hierarchy.dendrogram(Z, no_plot=True) + >>> dn['color_list'] + ['C1', 'C0', 'C0', 'C0', 'C0'] + >>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k']) + >>> dn = hierarchy.dendrogram(Z, no_plot=True, above_threshold_color='b') + >>> dn['color_list'] + ['c', 'b', 'b', 'b', 'b'] + >>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267, + ... above_threshold_color='k') + >>> dn['color_list'] + ['c', 'm', 'm', 'k', 'k'] + + Now, reset the color palette to its default: + + >>> hierarchy.set_link_color_palette(None) + + """ + if palette is None: + # reset to its default + palette = _link_line_colors_default + elif not isinstance(palette, (list, tuple)): + raise TypeError("palette must be a list or tuple") + _ptypes = [isinstance(p, str) for p in palette] + + if False in _ptypes: + raise TypeError("all palette list elements must be color strings") + + global _link_line_colors + _link_line_colors = palette + + +def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None, + get_leaves=True, orientation='top', labels=None, + count_sort=False, distance_sort=False, show_leaf_counts=True, + no_plot=False, no_labels=False, leaf_font_size=None, + leaf_rotation=None, leaf_label_func=None, + show_contracted=False, link_color_func=None, ax=None, + above_threshold_color='C0'): + """ + Plot the hierarchical clustering as a dendrogram. + + The dendrogram illustrates how each cluster is + composed by drawing a U-shaped link between a non-singleton + cluster and its children. The top of the U-link indicates a + cluster merge. The two legs of the U-link indicate which clusters + were merged. The length of the two legs of the U-link represents + the distance between the child clusters. It is also the + cophenetic distance between original observations in the two + children clusters. + + Parameters + ---------- + Z : ndarray + The linkage matrix encoding the hierarchical clustering to + render as a dendrogram. See the ``linkage`` function for more + information on the format of ``Z``. + p : int, optional + The ``p`` parameter for ``truncate_mode``. + truncate_mode : str, optional + The dendrogram can be hard to read when the original + observation matrix from which the linkage is derived is + large. Truncation is used to condense the dendrogram. There + are several modes: + + ``None`` + No truncation is performed (default). + Note: ``'none'`` is an alias for ``None`` that's kept for + backward compatibility. + + ``'lastp'`` + The last ``p`` non-singleton clusters formed in the linkage are the + only non-leaf nodes in the linkage; they correspond to rows + ``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are + contracted into leaf nodes. + + ``'level'`` + No more than ``p`` levels of the dendrogram tree are displayed. + A "level" includes all nodes with ``p`` merges from the final merge. + + Note: ``'mtica'`` is an alias for ``'level'`` that's kept for + backward compatibility. + + color_threshold : double, optional + For brevity, let :math:`t` be the ``color_threshold``. + Colors all the descendent links below a cluster node + :math:`k` the same color if :math:`k` is the first node below + the cut threshold :math:`t`. All links connecting nodes with + distances greater than or equal to the threshold are colored + with de default matplotlib color ``'C0'``. If :math:`t` is less + than or equal to zero, all nodes are colored ``'C0'``. + If ``color_threshold`` is None or 'default', + corresponding with MATLAB(TM) behavior, the threshold is set to + ``0.7*max(Z[:,2])``. + + get_leaves : bool, optional + Includes a list ``R['leaves']=H`` in the result + dictionary. For each :math:`i`, ``H[i] == j``, cluster node + ``j`` appears in position ``i`` in the left-to-right traversal + of the leaves, where :math:`j < 2n-1` and :math:`i < n`. + orientation : str, optional + The direction to plot the dendrogram, which can be any + of the following strings: + + ``'top'`` + Plots the root at the top, and plot descendent links going downwards. + (default). + + ``'bottom'`` + Plots the root at the bottom, and plot descendent links going + upwards. + + ``'left'`` + Plots the root at the left, and plot descendent links going right. + + ``'right'`` + Plots the root at the right, and plot descendent links going left. + + labels : ndarray, optional + By default, ``labels`` is None so the index of the original observation + is used to label the leaf nodes. Otherwise, this is an :math:`n`-sized + sequence, with ``n == Z.shape[0] + 1``. The ``labels[i]`` value is the + text to put under the :math:`i` th leaf node only if it corresponds to + an original observation and not a non-singleton cluster. + count_sort : str or bool, optional + For each node n, the order (visually, from left-to-right) n's + two descendent links are plotted is determined by this + parameter, which can be any of the following values: + + ``False`` + Nothing is done. + + ``'ascending'`` or ``True`` + The child with the minimum number of original objects in its cluster + is plotted first. + + ``'descending'`` + The child with the maximum number of original objects in its cluster + is plotted first. + + Note, ``distance_sort`` and ``count_sort`` cannot both be True. + distance_sort : str or bool, optional + For each node n, the order (visually, from left-to-right) n's + two descendent links are plotted is determined by this + parameter, which can be any of the following values: + + ``False`` + Nothing is done. + + ``'ascending'`` or ``True`` + The child with the minimum distance between its direct descendents is + plotted first. + + ``'descending'`` + The child with the maximum distance between its direct descendents is + plotted first. + + Note ``distance_sort`` and ``count_sort`` cannot both be True. + show_leaf_counts : bool, optional + When True, leaf nodes representing :math:`k>1` original + observation are labeled with the number of observations they + contain in parentheses. + no_plot : bool, optional + When True, the final rendering is not performed. This is + useful if only the data structures computed for the rendering + are needed or if matplotlib is not available. + no_labels : bool, optional + When True, no labels appear next to the leaf nodes in the + rendering of the dendrogram. + leaf_rotation : double, optional + Specifies the angle (in degrees) to rotate the leaf + labels. When unspecified, the rotation is based on the number of + nodes in the dendrogram (default is 0). + leaf_font_size : int, optional + Specifies the font size (in points) of the leaf labels. When + unspecified, the size based on the number of nodes in the + dendrogram. + leaf_label_func : lambda or function, optional + When ``leaf_label_func`` is a callable function, for each + leaf with cluster index :math:`k < 2n-1`. The function + is expected to return a string with the label for the + leaf. + + Indices :math:`k < n` correspond to original observations + while indices :math:`k \\geq n` correspond to non-singleton + clusters. + + For example, to label singletons with their node id and + non-singletons with their id, count, and inconsistency + coefficient, simply do:: + + # First define the leaf label function. + def llf(id): + if id < n: + return str(id) + else: + return '[%d %d %1.2f]' % (id, count, R[n-id,3]) + + # The text for the leaf nodes is going to be big so force + # a rotation of 90 degrees. + dendrogram(Z, leaf_label_func=llf, leaf_rotation=90) + + # leaf_label_func can also be used together with ``truncate_mode``, + # in which case you will get your leaves labeled after truncation: + dendrogram(Z, leaf_label_func=llf, leaf_rotation=90, + truncate_mode='level', p=2) + + show_contracted : bool, optional + When True the heights of non-singleton nodes contracted + into a leaf node are plotted as crosses along the link + connecting that leaf node. This really is only useful when + truncation is used (see ``truncate_mode`` parameter). + link_color_func : callable, optional + If given, `link_color_function` is called with each non-singleton id + corresponding to each U-shaped link it will paint. The function is + expected to return the color to paint the link, encoded as a matplotlib + color string code. For example:: + + dendrogram(Z, link_color_func=lambda k: colors[k]) + + colors the direct links below each untruncated non-singleton node + ``k`` using ``colors[k]``. + ax : matplotlib Axes instance, optional + If None and `no_plot` is not True, the dendrogram will be plotted + on the current axes. Otherwise if `no_plot` is not True the + dendrogram will be plotted on the given ``Axes`` instance. This can be + useful if the dendrogram is part of a more complex figure. + above_threshold_color : str, optional + This matplotlib color string sets the color of the links above the + color_threshold. The default is ``'C0'``. + + Returns + ------- + R : dict + A dictionary of data structures computed to render the + dendrogram. Its has the following keys: + + ``'color_list'`` + A list of color names. The k'th element represents the color of the + k'th link. + + ``'icoord'`` and ``'dcoord'`` + Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]`` + where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]`` + where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is + ``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``. + + ``'ivl'`` + A list of labels corresponding to the leaf nodes. + + ``'leaves'`` + For each i, ``H[i] == j``, cluster node ``j`` appears in position + ``i`` in the left-to-right traversal of the leaves, where + :math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the + ``i``-th leaf node corresponds to an original observation. + Otherwise, it corresponds to a non-singleton cluster. + + ``'leaves_color_list'`` + A list of color names. The k'th element represents the color of the + k'th leaf. + + See Also + -------- + linkage, set_link_color_palette + + Notes + ----- + It is expected that the distances in ``Z[:,2]`` be monotonic, otherwise + crossings appear in the dendrogram. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> import matplotlib.pyplot as plt + + A very basic example: + + >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., + ... 400., 754., 564., 138., 219., 869., 669.]) + >>> Z = hierarchy.linkage(ytdist, 'single') + >>> plt.figure() + >>> dn = hierarchy.dendrogram(Z) + + Now, plot in given axes, improve the color scheme and use both vertical and + horizontal orientations: + + >>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k']) + >>> fig, axes = plt.subplots(1, 2, figsize=(8, 3)) + >>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y', + ... orientation='top') + >>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], + ... above_threshold_color='#bcbddc', + ... orientation='right') + >>> hierarchy.set_link_color_palette(None) # reset to default after use + >>> plt.show() + + """ + # This feature was thought about but never implemented (still useful?): + # + # ... = dendrogram(..., leaves_order=None) + # + # Plots the leaves in the order specified by a vector of + # original observation indices. If the vector contains duplicates + # or results in a crossing, an exception will be thrown. Passing + # None orders leaf nodes based on the order they appear in the + # pre-order traversal. + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + + if orientation not in ["top", "left", "bottom", "right"]: + raise ValueError("orientation must be one of 'top', 'left', " + "'bottom', or 'right'") + + if labels is not None: + try: + len_labels = len(labels) + except (TypeError, AttributeError): + len_labels = labels.shape[0] + if Z.shape[0] + 1 != len_labels: + raise ValueError("Dimensions of Z and labels must be consistent.") + + is_valid_linkage(Z, throw=True, name='Z') + Zs = Z.shape + n = Zs[0] + 1 + if isinstance(p, (int, float)): + p = int(p) + else: + raise TypeError('The second argument must be a number') + + if truncate_mode not in ('lastp', 'mtica', 'level', 'none', None): + # 'mtica' is kept working for backwards compat. + raise ValueError('Invalid truncation mode.') + + if truncate_mode == 'lastp': + if p > n or p == 0: + p = n + + if truncate_mode == 'mtica': + # 'mtica' is an alias + truncate_mode = 'level' + + if truncate_mode == 'level': + if p <= 0: + p = np.inf + + if get_leaves: + lvs = [] + else: + lvs = None + + icoord_list = [] + dcoord_list = [] + color_list = [] + current_color = [0] + currently_below_threshold = [False] + ivl = [] # list of leaves + + if color_threshold is None or (isinstance(color_threshold, str) and + color_threshold == 'default'): + color_threshold = max(Z[:, 2]) * 0.7 + + R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl, + 'leaves': lvs, 'color_list': color_list} + + # Empty list will be filled in _dendrogram_calculate_info + contraction_marks = [] if show_contracted else None + + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=2*n - 2, + iv=0.0, + ivl=ivl, + n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, + lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + if not no_plot: + mh = max(Z[:, 2]) + _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation, + no_labels, color_list, + leaf_font_size=leaf_font_size, + leaf_rotation=leaf_rotation, + contraction_marks=contraction_marks, + ax=ax, + above_threshold_color=above_threshold_color) + + R["leaves_color_list"] = _get_leaves_color_list(R) + + return R + + +def _get_leaves_color_list(R): + leaves_color_list = [None] * len(R['leaves']) + for link_x, link_y, link_color in zip(R['icoord'], + R['dcoord'], + R['color_list']): + for (xi, yi) in zip(link_x, link_y): + if yi == 0.0 and (xi % 5 == 0 and xi % 2 == 1): + # if yi is 0.0 and xi is divisible by 5 and odd, + # the point is a leaf + # xi of leaves are 5, 15, 25, 35, ... (see `iv_ticks`) + # index of leaves are 0, 1, 2, 3, ... as below + leaf_index = (int(xi) - 5) // 10 + # each leaf has a same color of its link. + leaves_color_list[leaf_index] = link_color + return leaves_color_list + + +def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, + i, labels): + # If the leaf id structure is not None and is a list then the caller + # to dendrogram has indicated that cluster id's corresponding to the + # leaf nodes should be recorded. + + if lvs is not None: + lvs.append(int(i)) + + # If leaf node labels are to be displayed... + if ivl is not None: + # If a leaf_label_func has been provided, the label comes from the + # string returned from the leaf_label_func, which is a function + # passed to dendrogram. + if leaf_label_func: + ivl.append(leaf_label_func(int(i))) + else: + # Otherwise, if the dendrogram caller has passed a labels list + # for the leaf nodes, use it. + if labels is not None: + ivl.append(labels[int(i - n)]) + else: + # Otherwise, use the id as the label for the leaf.x + ivl.append(str(int(i))) + + +def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, + i, labels, show_leaf_counts): + # If the leaf id structure is not None and is a list then the caller + # to dendrogram has indicated that cluster id's corresponding to the + # leaf nodes should be recorded. + + if lvs is not None: + lvs.append(int(i)) + if ivl is not None: + if leaf_label_func: + ivl.append(leaf_label_func(int(i))) + else: + if show_leaf_counts: + ivl.append("(" + str(np.asarray(Z[i - n, 3], dtype=np.int64)) + ")") + else: + ivl.append("") + + +def _append_contraction_marks(Z, iv, i, n, contraction_marks, xp): + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 0], xp), + n, contraction_marks, xp) + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 1], xp), + n, contraction_marks, xp) + + +def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks, xp): + if i >= n: + contraction_marks.append((iv, Z[i - n, 2])) + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 0], xp), + n, contraction_marks, xp) + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 1], xp), + n, contraction_marks, xp) + + +def _dendrogram_calculate_info(Z, p, truncate_mode, + color_threshold=np.inf, get_leaves=True, + orientation='top', labels=None, + count_sort=False, distance_sort=False, + show_leaf_counts=False, i=-1, iv=0.0, + ivl=[], n=0, icoord_list=[], dcoord_list=[], + lvs=None, mhr=False, + current_color=[], color_list=[], + currently_below_threshold=[], + leaf_label_func=None, level=0, + contraction_marks=None, + link_color_func=None, + above_threshold_color='C0'): + """ + Calculate the endpoints of the links as well as the labels for the + the dendrogram rooted at the node with index i. iv is the independent + variable value to plot the left-most leaf node below the root node i + (if orientation='top', this would be the left-most x value where the + plotting of this root node i and its descendents should begin). + + ivl is a list to store the labels of the leaf nodes. The leaf_label_func + is called whenever ivl != None, labels == None, and + leaf_label_func != None. When ivl != None and labels != None, the + labels list is used only for labeling the leaf nodes. When + ivl == None, no labels are generated for leaf nodes. + + When get_leaves==True, a list of leaves is built as they are visited + in the dendrogram. + + Returns a tuple with l being the independent variable coordinate that + corresponds to the midpoint of cluster to the left of cluster i if + i is non-singleton, otherwise the independent coordinate of the leaf + node if i is a leaf node. + + Returns + ------- + A tuple (left, w, h, md), where: + * left is the independent variable coordinate of the center of the + the U of the subtree + + * w is the amount of space used for the subtree (in independent + variable units) + + * h is the height of the subtree in dependent variable units + + * md is the ``max(Z[*,2]``) for all nodes ``*`` below and including + the target node. + + """ + xp = array_namespace(Z) + if n == 0: + raise ValueError("Invalid singleton cluster count n.") + + if i == -1: + raise ValueError("Invalid root cluster index i.") + + if truncate_mode == 'lastp': + # If the node is a leaf node but corresponds to a non-singleton + # cluster, its label is either the empty string or the number of + # original observations belonging to cluster i. + if 2*n - p > i >= n: + d = Z[i - n, 2] + _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels, + show_leaf_counts) + if contraction_marks is not None: + _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks, xp) + return (iv + 5.0, 10.0, 0.0, d) + elif i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + elif truncate_mode == 'level': + if i > n and level > p: + d = Z[i - n, 2] + _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels, + show_leaf_counts) + if contraction_marks is not None: + _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks, xp) + return (iv + 5.0, 10.0, 0.0, d) + elif i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + + # Otherwise, only truncate if we have a leaf node. + # + # Only place leaves if they correspond to original observations. + if i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + + # !!! Otherwise, we don't have a leaf node, so work on plotting a + # non-leaf node. + # Actual indices of a and b + aa = int_floor(Z[i - n, 0], xp) + ab = int_floor(Z[i - n, 1], xp) + if aa >= n: + # The number of singletons below cluster a + na = Z[aa - n, 3] + # The distance between a's two direct children. + da = Z[aa - n, 2] + else: + na = 1 + da = 0.0 + if ab >= n: + nb = Z[ab - n, 3] + db = Z[ab - n, 2] + else: + nb = 1 + db = 0.0 + + if count_sort == 'ascending' or count_sort is True: + # If a has a count greater than b, it and its descendents should + # be drawn to the right. Otherwise, to the left. + if na > nb: + # The cluster index to draw to the left (ua) will be ab + # and the one to draw to the right (ub) will be aa + ua = ab + ub = aa + else: + ua = aa + ub = ab + elif count_sort == 'descending': + # If a has a count less than or equal to b, it and its + # descendents should be drawn to the left. Otherwise, to + # the right. + if na > nb: + ua = aa + ub = ab + else: + ua = ab + ub = aa + elif distance_sort == 'ascending' or distance_sort is True: + # If a has a distance greater than b, it and its descendents should + # be drawn to the right. Otherwise, to the left. + if da > db: + ua = ab + ub = aa + else: + ua = aa + ub = ab + elif distance_sort == 'descending': + # If a has a distance less than or equal to b, it and its + # descendents should be drawn to the left. Otherwise, to + # the right. + if da > db: + ua = aa + ub = ab + else: + ua = ab + ub = aa + else: + ua = aa + ub = ab + + # Updated iv variable and the amount of space used. + (uiva, uwa, uah, uamd) = \ + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=ua, iv=iv, ivl=ivl, n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + level=level + 1, contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + h = Z[i - n, 2] + if h >= color_threshold or color_threshold <= 0: + c = above_threshold_color + + if currently_below_threshold[0]: + current_color[0] = (current_color[0] + 1) % len(_link_line_colors) + currently_below_threshold[0] = False + else: + currently_below_threshold[0] = True + c = _link_line_colors[current_color[0]] + + (uivb, uwb, ubh, ubmd) = \ + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=ub, iv=iv + uwa, ivl=ivl, n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + level=level + 1, contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + max_dist = max(uamd, ubmd, h) + + icoord_list.append([uiva, uiva, uivb, uivb]) + dcoord_list.append([uah, h, h, ubh]) + if link_color_func is not None: + v = link_color_func(int(i)) + if not isinstance(v, str): + raise TypeError("link_color_func must return a matplotlib " + "color string!") + color_list.append(v) + else: + color_list.append(c) + + return (((uiva + uivb) / 2), uwa + uwb, h, max_dist) + + +def is_isomorphic(T1, T2): + """ + Determine if two different cluster assignments are equivalent. + + Parameters + ---------- + T1 : array_like + An assignment of singleton cluster ids to flat cluster ids. + T2 : array_like + An assignment of singleton cluster ids to flat cluster ids. + + Returns + ------- + b : bool + Whether the flat cluster assignments `T1` and `T2` are + equivalent. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + fcluster : for the creation of flat cluster assignments. + + Examples + -------- + >>> from scipy.cluster.hierarchy import fcluster, is_isomorphic + >>> from scipy.cluster.hierarchy import single, complete + >>> from scipy.spatial.distance import pdist + + Two flat cluster assignments can be isomorphic if they represent the same + cluster assignment, with different labels. + + For example, we can use the `scipy.cluster.hierarchy.single`: method + and flatten the output to four clusters: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = single(pdist(X)) + >>> T = fcluster(Z, 1, criterion='distance') + >>> T + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + + We can then do the same using the + `scipy.cluster.hierarchy.complete`: method: + + >>> Z = complete(pdist(X)) + >>> T_ = fcluster(Z, 1.5, criterion='distance') + >>> T_ + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + As we can see, in both cases we obtain four clusters and all the data + points are distributed in the same way - the only thing that changes + are the flat cluster labels (3 => 1, 4 =>2, 2 =>3 and 4 =>1), so both + cluster assignments are isomorphic: + + >>> is_isomorphic(T, T_) + True + + """ + T1 = np.asarray(T1, order='c') + T2 = np.asarray(T2, order='c') + + T1S = T1.shape + T2S = T2.shape + + if len(T1S) != 1: + raise ValueError('T1 must be one-dimensional.') + if len(T2S) != 1: + raise ValueError('T2 must be one-dimensional.') + if T1S[0] != T2S[0]: + raise ValueError('T1 and T2 must have the same number of elements.') + n = T1S[0] + d1 = {} + d2 = {} + for i in range(0, n): + if T1[i] in d1: + if T2[i] not in d2: + return False + if d1[T1[i]] != T2[i] or d2[T2[i]] != T1[i]: + return False + elif T2[i] in d2: + return False + else: + d1[T1[i]] = T2[i] + d2[T2[i]] = T1[i] + return True + + +def maxdists(Z): + """ + Return the maximum distance between any non-singleton cluster. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + ``linkage`` for more information. + + Returns + ------- + maxdists : ndarray + A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents + the maximum distance between any cluster (including + singletons) below and including the node with index i. More + specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the + set of all node indices below and including node i. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + is_monotonic : for testing for monotonicity of a linkage matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, maxdists + >>> from scipy.spatial.distance import pdist + + Given a linkage matrix ``Z``, `scipy.cluster.hierarchy.maxdists` + computes for each new cluster generated (i.e., for each row of the linkage + matrix) what is the maximum distance between any two child clusters. + + Due to the nature of hierarchical clustering, in many cases this is going + to be just the distance between the two child clusters that were merged + to form the current one - that is, Z[:,2]. + + However, for non-monotonic cluster assignments such as + `scipy.cluster.hierarchy.median` clustering this is not always the + case: There may be cluster formations were the distance between the two + clusters merged is smaller than the distance between their children. + + We can see this in an example: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> maxdists(Z) + array([1. , 1. , 1. , 1. , 1.11803399, + 1.11803399, 1.11803399, 1.11803399, 3. , 3.5 , + 3.5 ]) + + Note that while the distance between the two clusters merged when creating the + last cluster is 3.25, there are two children (clusters 16 and 17) whose distance + is larger (3.5). Thus, `scipy.cluster.hierarchy.maxdists` returns 3.5 in + this case. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + n = Z.shape[0] + 1 + MD = np.zeros((n - 1,)) + Z = np.asarray(Z) + _hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n)) + MD = xp.asarray(MD) + return MD + + +def maxinconsts(Z, R): + """ + Return the maximum inconsistency coefficient for each + non-singleton cluster and its children. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + `linkage` for more information. + R : ndarray + The inconsistency matrix. + + Returns + ------- + MI : ndarray + A monotonic ``(n-1)``-sized numpy array of doubles. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + inconsistent : for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, inconsistent, maxinconsts + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> R = inconsistent(Z) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.74535599, 1.08655358, 3. , 1.15470054], + [1.91202266, 1.37522872, 3. , 1.15470054], + [3.25 , 0.25 , 3. , 0. ]]) + + Here, `scipy.cluster.hierarchy.maxinconsts` can be used to compute + the maximum value of the inconsistency statistic (the last column of + ``R``) for each non-singleton cluster and its children: + + >>> maxinconsts(Z, R) + array([0. , 0. , 0. , 0. , 0.70710678, + 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054, + 1.15470054]) + + """ + xp = array_namespace(Z, R) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + R = _asarray(R, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + is_valid_im(R, throw=True, name='R') + + n = Z.shape[0] + 1 + if Z.shape[0] != R.shape[0]: + raise ValueError("The inconsistency matrix and linkage matrix each " + "have a different number of rows.") + MI = np.zeros((n - 1,)) + Z = np.asarray(Z) + R = np.asarray(R) + _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3) + MI = xp.asarray(MI) + return MI + + +def maxRstat(Z, R, i): + """ + Return the maximum statistic for each non-singleton cluster and its + children. + + Parameters + ---------- + Z : array_like + The hierarchical clustering encoded as a matrix. See `linkage` for more + information. + R : array_like + The inconsistency matrix. + i : int + The column of `R` to use as the statistic. + + Returns + ------- + MR : ndarray + Calculates the maximum statistic for the i'th column of the + inconsistency matrix `R` for each non-singleton cluster + node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]``, where + ``Q(j)`` the set of all node ids corresponding to nodes below + and including ``j``. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + inconsistent : for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> R = inconsistent(Z) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.74535599, 1.08655358, 3. , 1.15470054], + [1.91202266, 1.37522872, 3. , 1.15470054], + [3.25 , 0.25 , 3. , 0. ]]) + + `scipy.cluster.hierarchy.maxRstat` can be used to compute + the maximum value of each column of ``R``, for each non-singleton + cluster and its children: + + >>> maxRstat(Z, R, 0) + array([1. , 1. , 1. , 1. , 1.05901699, + 1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266, + 3.25 ]) + >>> maxRstat(Z, R, 1) + array([0. , 0. , 0. , 0. , 0.08346263, + 0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872, + 1.37522872]) + >>> maxRstat(Z, R, 3) + array([0. , 0. , 0. , 0. , 0.70710678, + 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054, + 1.15470054]) + + """ + xp = array_namespace(Z, R) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + R = _asarray(R, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + is_valid_im(R, throw=True, name='R') + + if not isinstance(i, int): + raise TypeError('The third argument must be an integer.') + + if i < 0 or i > 3: + raise ValueError('i must be an integer between 0 and 3 inclusive.') + + if Z.shape[0] != R.shape[0]: + raise ValueError("The inconsistency matrix and linkage matrix each " + "have a different number of rows.") + + n = Z.shape[0] + 1 + MR = np.zeros((n - 1,)) + Z = np.asarray(Z) + R = np.asarray(R) + _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i) + MR = xp.asarray(MR) + return MR + + +def leaders(Z, T): + """ + Return the root nodes in a hierarchical clustering. + + Returns the root nodes in a hierarchical clustering corresponding + to a cut defined by a flat cluster assignment vector ``T``. See + the ``fcluster`` function for more information on the format of ``T``. + + For each flat cluster :math:`j` of the :math:`k` flat clusters + represented in the n-sized flat cluster assignment vector ``T``, + this function finds the lowest cluster node :math:`i` in the linkage + tree Z, such that: + + * leaf descendants belong only to flat cluster j + (i.e., ``T[p]==j`` for all :math:`p` in :math:`S(i)`, where + :math:`S(i)` is the set of leaf ids of descendant leaf nodes + with cluster node :math:`i`) + + * there does not exist a leaf that is not a descendant with + :math:`i` that also belongs to cluster :math:`j` + (i.e., ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If + this condition is violated, ``T`` is not a valid cluster + assignment vector, and an exception will be thrown. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + `linkage` for more information. + T : ndarray + The flat cluster assignment vector. + + Returns + ------- + L : ndarray + The leader linkage node id's stored as a k-element 1-D array, + where ``k`` is the number of flat clusters found in ``T``. + + ``L[j]=i`` is the linkage cluster node id that is the + leader of flat cluster with id M[j]. If ``i < n``, ``i`` + corresponds to an original observation, otherwise it + corresponds to a non-singleton cluster. + M : ndarray + The leader linkage node id's stored as a k-element 1-D array, where + ``k`` is the number of flat clusters found in ``T``. This allows the + set of flat cluster ids to be any arbitrary set of ``k`` integers. + + For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with + id 8's leader is linkage node 2. + + See Also + -------- + fcluster : for the creation of flat cluster assignments. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster, leaders + >>> from scipy.spatial.distance import pdist + + Given a linkage matrix ``Z`` - obtained after apply a clustering method + to a dataset ``X`` - and a flat cluster assignment array ``T``: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + >>> T = fcluster(Z, 3, criterion='distance') + >>> T + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + `scipy.cluster.hierarchy.leaders` returns the indices of the nodes + in the dendrogram that are the leaders of each flat cluster: + + >>> L, M = leaders(Z, T) + >>> L + array([16, 17, 18, 19], dtype=int32) + + (remember that indices 0-11 point to the 12 data points in ``X``, + whereas indices 12-22 point to the 11 rows of ``Z``) + + `scipy.cluster.hierarchy.leaders` also returns the indices of + the flat clusters in ``T``: + + >>> M + array([1, 2, 3, 4], dtype=int32) + + """ + xp = array_namespace(Z, T) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + T = _asarray(T, order='C', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + if T.dtype != xp.int32: + raise TypeError('T must be a 1-D array of dtype int32.') + + if T.shape[0] != Z.shape[0] + 1: + raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.') + + n_clusters = int(xp.unique_values(T).shape[0]) + n_obs = int(Z.shape[0] + 1) + L = np.zeros(n_clusters, dtype=np.int32) + M = np.zeros(n_clusters, dtype=np.int32) + Z = np.asarray(Z) + T = np.asarray(T, dtype=np.int32) + s = _hierarchy.leaders(Z, T, L, M, n_clusters, n_obs) + if s >= 0: + raise ValueError(('T is not a valid assignment vector. Error found ' + 'when examining linkage node %d (< 2n-1).') % s) + L, M = xp.asarray(L), xp.asarray(M) + return (L, M) diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py b/venv/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..7d874ca5eb7141a44559307d1c28dd412171396f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py @@ -0,0 +1,145 @@ +from numpy import array + + +Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02], + [7.50205180e-01, 4.60299830e-01, 8.98696460e-01], + [6.65461230e-01, 6.94011420e-01, 9.10465700e-01], + [9.64047590e-01, 1.43082200e-03, 7.39874220e-01], + [1.08159060e-01, 5.53028790e-01, 6.63804780e-02], + [9.31359130e-01, 8.25424910e-01, 9.52315440e-01], + [6.78086960e-01, 3.41903970e-01, 5.61481950e-01], + [9.82730940e-01, 7.04605210e-01, 8.70978630e-02], + [6.14691610e-01, 4.69989230e-02, 6.02406450e-01], + [5.80161260e-01, 9.17354970e-01, 5.88163850e-01], + [1.38246310e+00, 1.96358160e+00, 1.94437880e+00], + [2.10675860e+00, 1.67148730e+00, 1.34854480e+00], + [1.39880070e+00, 1.66142050e+00, 1.32224550e+00], + [1.71410460e+00, 1.49176380e+00, 1.45432170e+00], + [1.54102340e+00, 1.84374950e+00, 1.64658950e+00], + [2.08512480e+00, 1.84524350e+00, 2.17340850e+00], + [1.30748740e+00, 1.53801650e+00, 2.16007740e+00], + [1.41447700e+00, 1.99329070e+00, 1.99107420e+00], + [1.61943490e+00, 1.47703280e+00, 1.89788160e+00], + [1.59880600e+00, 1.54988980e+00, 1.57563350e+00], + [3.37247380e+00, 2.69635310e+00, 3.39981700e+00], + [3.13705120e+00, 3.36528090e+00, 3.06089070e+00], + [3.29413250e+00, 3.19619500e+00, 2.90700170e+00], + [2.65510510e+00, 3.06785900e+00, 2.97198540e+00], + [3.30941040e+00, 2.59283970e+00, 2.57714110e+00], + [2.59557220e+00, 3.33477370e+00, 3.08793190e+00], + [2.58206180e+00, 3.41615670e+00, 3.26441990e+00], + [2.71127000e+00, 2.77032450e+00, 2.63466500e+00], + [2.79617850e+00, 3.25473720e+00, 3.41801560e+00], + [2.64741750e+00, 2.54538040e+00, 3.25354110e+00]]) + +ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754., + 564., 138., 219., 869., 669.]) + +linkage_ytdist_single = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]]) + +linkage_ytdist_complete = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [1., 6., 400., 3.], + [0., 7., 412., 3.], + [8., 9., 996., 6.]]) + +linkage_ytdist_average = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 333.5, 3.], + [1., 6., 347.5, 3.], + [8., 9., 680.77777778, 6.]]) + +linkage_ytdist_weighted = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 333.5, 3.], + [1., 6., 347.5, 3.], + [8., 9., 670.125, 6.]]) + +# the optimal leaf ordering of linkage_ytdist_single +linkage_ytdist_single_olo = array([[5., 2., 138., 2.], + [4., 3., 219., 2.], + [7., 0., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]]) + +X = array([[1.43054825, -7.5693489], + [6.95887839, 6.82293382], + [2.87137846, -9.68248579], + [7.87974764, -6.05485803], + [8.24018364, -6.09495602], + [7.39020262, 8.54004355]]) + +linkage_X_centroid = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 6.43614494, 4.], + [7., 9., 15.17363237, 6.]]) + +linkage_X_median = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 6.43614494, 4.], + [7., 9., 15.17363237, 6.]]) + +linkage_X_ward = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 9.10208346, 4.], + [7., 9., 24.7784379, 6.]]) + +# the optimal leaf ordering of linkage_X_ward +linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.], + [5., 1., 1.77045373, 2.], + [2., 0., 2.55760419, 2.], + [6., 8., 9.10208346, 4.], + [7., 9., 24.7784379, 6.]]) + +inconsistent_ytdist = { + 1: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [255., 0., 1., 0.], + [268., 0., 1., 0.], + [295., 0., 1., 0.]]), + 2: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [261.5, 9.19238816, 2., 0.70710678], + [233.66666667, 83.9424406, 3., 0.7306594]]), + 3: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [247.33333333, 25.38372182, 3., 0.81417007], + [239., 69.36377537, 4., 0.80733783]]), + 4: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [247.33333333, 25.38372182, 3., 0.81417007], + [235., 60.73302232, 5., 0.98793042]])} + +fcluster_inconsistent = { + 0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} + +fcluster_distance = { + 0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3, + 1, 1, 1, 2, 1, 1, 1, 1, 1]), + 1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} + +fcluster_maxclust = { + 8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4, + 1, 1, 1, 3, 1, 1, 1, 1, 2]), + 4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py b/venv/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py new file mode 100644 index 0000000000000000000000000000000000000000..c474bee6f4b943a1dd9047374f5791a728ccd8be --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py @@ -0,0 +1,1225 @@ +# +# Author: Damian Eads +# Date: April 17, 2008 +# +# Copyright (C) 2008 Damian Eads +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns +import pytest +from pytest import raises as assert_raises + +import scipy.cluster.hierarchy +from scipy.cluster.hierarchy import ( + ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage, + num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster, + is_isomorphic, single, leaders, + correspond, is_monotonic, maxdists, maxinconsts, maxRstat, + is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram, + set_link_color_palette, cut_tree, optimal_leaf_ordering, + _order_cluster_tree, _hierarchy, _LINKAGE_METHODS) +from scipy.spatial.distance import pdist +from scipy.cluster._hierarchy import Heap +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import xp_assert_close, xp_assert_equal + +from . import hierarchy_test_data + + +# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so +# check if it's available +try: + import matplotlib + # and set the backend to be Agg (no gui) + matplotlib.use('Agg') + # before importing pyplot + import matplotlib.pyplot as plt + have_matplotlib = True +except Exception: + have_matplotlib = False + + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")] +skip_if_array_api = pytest.mark.skip_if_array_api + + +class TestLinkage: + + @skip_if_array_api(cpu_only=True) + def test_linkage_non_finite_elements_in_distance_matrix(self, xp): + # Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf). + # Exception expected. + y = xp.zeros((6,)) + y[0] = xp.nan + assert_raises(ValueError, linkage, y) + + @skip_if_array_api(cpu_only=True) + def test_linkage_empty_distance_matrix(self, xp): + # Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected. + y = xp.zeros((0,)) + assert_raises(ValueError, linkage, y) + + @skip_if_array_api(cpu_only=True) + def test_linkage_tdist(self, xp): + for method in ['single', 'complete', 'average', 'weighted']: + self.check_linkage_tdist(method, xp) + + def check_linkage_tdist(self, method, xp): + # Tests linkage(Y, method) on the tdist data set. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), method) + expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method) + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10) + + @skip_if_array_api(cpu_only=True) + def test_linkage_X(self, xp): + for method in ['centroid', 'median', 'ward']: + self.check_linkage_q(method, xp) + + def check_linkage_q(self, method, xp): + # Tests linkage(Y, method) on the Q data set. + Z = linkage(xp.asarray(hierarchy_test_data.X), method) + expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method) + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + y = scipy.spatial.distance.pdist(hierarchy_test_data.X, + metric="euclidean") + Z = linkage(xp.asarray(y), method) + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + @skip_if_array_api(cpu_only=True) + def test_compare_with_trivial(self, xp): + rng = np.random.RandomState(0) + n = 20 + X = rng.rand(n, 2) + d = pdist(X) + + for method, code in _LINKAGE_METHODS.items(): + Z_trivial = _hierarchy.linkage(d, n, code) + Z = linkage(xp.asarray(d), method) + xp_assert_close(Z, xp.asarray(Z_trivial), rtol=1e-14, atol=1e-15) + + @skip_if_array_api(cpu_only=True) + def test_optimal_leaf_ordering(self, xp): + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), optimal_ordering=True) + expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo') + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10) + + +@skip_if_array_api(cpu_only=True) +class TestLinkageTies: + + _expectations = { + 'single': np.array([[0, 1, 1.41421356, 2], + [2, 3, 1.41421356, 3]]), + 'complete': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.82842712, 3]]), + 'average': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'weighted': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'centroid': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'median': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'ward': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.44948974, 3]]), + } + + def test_linkage_ties(self, xp): + for method in ['single', 'complete', 'average', 'weighted', + 'centroid', 'median', 'ward']: + self.check_linkage_ties(method, xp) + + def check_linkage_ties(self, method, xp): + X = xp.asarray([[-1, -1], [0, 0], [1, 1]]) + Z = linkage(X, method=method) + expectedZ = self._expectations[method] + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + +@skip_if_array_api(cpu_only=True) +class TestInconsistent: + + def test_inconsistent_tdist(self, xp): + for depth in hierarchy_test_data.inconsistent_ytdist: + self.check_inconsistent_tdist(depth, xp) + + def check_inconsistent_tdist(self, depth, xp): + Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single) + xp_assert_close(inconsistent(Z, depth), + xp.asarray(hierarchy_test_data.inconsistent_ytdist[depth])) + + +@skip_if_array_api(cpu_only=True) +class TestCopheneticDistance: + + def test_linkage_cophenet_tdist_Z(self, xp): + # Tests cophenet(Z) on tdist data set. + expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, + 295, 138, 219, 295, 295]) + Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single) + M = cophenet(Z) + xp_assert_close(M, xp.asarray(expectedM, dtype=xp.float64), atol=1e-10) + + def test_linkage_cophenet_tdist_Z_Y(self, xp): + # Tests cophenet(Z, Y) on tdist data set. + Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single) + (c, M) = cophenet(Z, xp.asarray(hierarchy_test_data.ytdist)) + expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, + 295, 138, 219, 295, 295], dtype=xp.float64) + expectedc = xp.asarray(0.639931296433393415057366837573, dtype=xp.float64)[()] + xp_assert_close(c, expectedc, atol=1e-10) + xp_assert_close(M, expectedM, atol=1e-10) + + +class TestMLabLinkageConversion: + + def test_mlab_linkage_conversion_empty(self, xp): + # Tests from/to_mlab_linkage on empty linkage array. + X = xp.asarray([], dtype=xp.float64) + xp_assert_equal(from_mlab_linkage(X), X) + xp_assert_equal(to_mlab_linkage(X), X) + + @skip_if_array_api(cpu_only=True) + def test_mlab_linkage_conversion_single_row(self, xp): + # Tests from/to_mlab_linkage on linkage array with single row. + Z = xp.asarray([[0., 1., 3., 2.]]) + Zm = xp.asarray([[1, 2, 3]]) + xp_assert_close(from_mlab_linkage(Zm), xp.asarray(Z, dtype=xp.float64), + rtol=1e-15) + xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64), + rtol=1e-15) + + @skip_if_array_api(cpu_only=True) + def test_mlab_linkage_conversion_multiple_rows(self, xp): + # Tests from/to_mlab_linkage on linkage array with multiple rows. + Zm = xp.asarray([[3, 6, 138], [4, 5, 219], + [1, 8, 255], [2, 9, 268], [7, 10, 295]]) + Z = xp.asarray([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]], + dtype=xp.float64) + xp_assert_close(from_mlab_linkage(Zm), Z, rtol=1e-15) + xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64), + rtol=1e-15) + + +@skip_if_array_api(cpu_only=True) +class TestFcluster: + + def test_fclusterdata(self, xp): + for t in hierarchy_test_data.fcluster_inconsistent: + self.check_fclusterdata(t, 'inconsistent', xp) + for t in hierarchy_test_data.fcluster_distance: + self.check_fclusterdata(t, 'distance', xp) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fclusterdata(t, 'maxclust', xp) + + def check_fclusterdata(self, t, criterion, xp): + # Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set + expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]) + X = xp.asarray(hierarchy_test_data.Q_X) + T = fclusterdata(X, criterion=criterion, t=t) + assert_(is_isomorphic(T, expectedT)) + + def test_fcluster(self, xp): + for t in hierarchy_test_data.fcluster_inconsistent: + self.check_fcluster(t, 'inconsistent', xp) + for t in hierarchy_test_data.fcluster_distance: + self.check_fcluster(t, 'distance', xp) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fcluster(t, 'maxclust', xp) + + def check_fcluster(self, t, criterion, xp): + # Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set. + expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]) + Z = single(xp.asarray(hierarchy_test_data.Q_X)) + T = fcluster(Z, criterion=criterion, t=t) + assert_(is_isomorphic(T, expectedT)) + + def test_fcluster_monocrit(self, xp): + for t in hierarchy_test_data.fcluster_distance: + self.check_fcluster_monocrit(t, xp) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fcluster_maxclust_monocrit(t, xp) + + def check_fcluster_monocrit(self, t, xp): + expectedT = xp.asarray(hierarchy_test_data.fcluster_distance[t]) + Z = single(xp.asarray(hierarchy_test_data.Q_X)) + T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z)) + assert_(is_isomorphic(T, expectedT)) + + def check_fcluster_maxclust_monocrit(self, t, xp): + expectedT = xp.asarray(hierarchy_test_data.fcluster_maxclust[t]) + Z = single(xp.asarray(hierarchy_test_data.Q_X)) + T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z)) + assert_(is_isomorphic(T, expectedT)) + + +@skip_if_array_api(cpu_only=True) +class TestLeaders: + + def test_leaders_single(self, xp): + # Tests leaders using a flat clustering generated by single linkage. + X = hierarchy_test_data.Q_X + Y = pdist(X) + Y = xp.asarray(Y) + Z = linkage(Y) + T = fcluster(Z, criterion='maxclust', t=3) + Lright = (xp.asarray([53, 55, 56]), xp.asarray([2, 3, 1])) + T = xp.asarray(T, dtype=xp.int32) + L = leaders(Z, T) + assert_allclose(np.concatenate(L), np.concatenate(Lright), rtol=1e-15) + + +@skip_if_array_api(np_only=True, + reasons=['`is_isomorphic` only supports NumPy backend']) +class TestIsIsomorphic: + + @skip_if_array_api(np_only=True, + reasons=['array-likes only supported for NumPy backend']) + def test_array_like(self, xp): + assert is_isomorphic([1, 1, 1], [2, 2, 2]) + assert is_isomorphic([], []) + + def test_is_isomorphic_1(self, xp): + # Tests is_isomorphic on test case #1 (one flat cluster, different labellings) + a = xp.asarray([1, 1, 1]) + b = xp.asarray([2, 2, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_2(self, xp): + # Tests is_isomorphic on test case #2 (two flat clusters, different labelings) + a = xp.asarray([1, 7, 1]) + b = xp.asarray([2, 3, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_3(self, xp): + # Tests is_isomorphic on test case #3 (no flat clusters) + a = xp.asarray([]) + b = xp.asarray([]) + assert is_isomorphic(a, b) + + def test_is_isomorphic_4A(self, xp): + # Tests is_isomorphic on test case #4A + # (3 flat clusters, different labelings, isomorphic) + a = xp.asarray([1, 2, 3]) + b = xp.asarray([1, 3, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_4B(self, xp): + # Tests is_isomorphic on test case #4B + # (3 flat clusters, different labelings, nonisomorphic) + a = xp.asarray([1, 2, 3, 3]) + b = xp.asarray([1, 3, 2, 3]) + assert is_isomorphic(a, b) is False + assert is_isomorphic(b, a) is False + + def test_is_isomorphic_4C(self, xp): + # Tests is_isomorphic on test case #4C + # (3 flat clusters, different labelings, isomorphic) + a = xp.asarray([7, 2, 3]) + b = xp.asarray([6, 3, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_5(self, xp): + # Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random + # clusters, random permutation of the labeling). + for nc in [2, 3, 5]: + self.help_is_isomorphic_randperm(1000, nc, xp=xp) + + def test_is_isomorphic_6(self, xp): + # Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random + # clusters, random permutation of the labeling, slightly + # nonisomorphic.) + for nc in [2, 3, 5]: + self.help_is_isomorphic_randperm(1000, nc, True, 5, xp=xp) + + def test_is_isomorphic_7(self, xp): + # Regression test for gh-6271 + a = xp.asarray([1, 2, 3]) + b = xp.asarray([1, 1, 1]) + assert not is_isomorphic(a, b) + + def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0, + *, xp): + for k in range(3): + a = (np.random.rand(nobs) * nclusters).astype(int) + b = np.zeros(a.size, dtype=int) + P = np.random.permutation(nclusters) + for i in range(0, a.shape[0]): + b[i] = P[a[i]] + if noniso: + Q = np.random.permutation(nobs) + b[Q[0:nerrors]] += 1 + b[Q[0:nerrors]] %= nclusters + a = xp.asarray(a) + b = xp.asarray(b) + assert is_isomorphic(a, b) == (not noniso) + assert is_isomorphic(b, a) == (not noniso) + + +@skip_if_array_api(cpu_only=True) +class TestIsValidLinkage: + + def test_is_valid_linkage_various_size(self, xp): + for nrow, ncol, valid in [(2, 5, False), (2, 3, False), + (1, 4, True), (2, 4, True)]: + self.check_is_valid_linkage_various_size(nrow, ncol, valid, xp) + + def check_is_valid_linkage_various_size(self, nrow, ncol, valid, xp): + # Tests is_valid_linkage(Z) with linkage matrices of various sizes + Z = xp.asarray([[0, 1, 3.0, 2, 5], + [3, 2, 4.0, 3, 3]], dtype=xp.float64) + Z = Z[:nrow, :ncol] + assert_(is_valid_linkage(Z) == valid) + if not valid: + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_int_type(self, xp): + # Tests is_valid_linkage(Z) with integer type. + Z = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.int64) + assert_(is_valid_linkage(Z) is False) + assert_raises(TypeError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_empty(self, xp): + # Tests is_valid_linkage(Z) with empty linkage. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_(is_valid_linkage(Z) is True) + + def test_is_valid_linkage_4_and_up_neg_index_left(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative indices (left). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,0] = -2 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up_neg_index_right(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative indices (right). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,1] = -2 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up_neg_dist(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative distances. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,2] = -0.5 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up_neg_counts(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative counts. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,3] = -2 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + +@skip_if_array_api(cpu_only=True) +class TestIsValidInconsistent: + + def test_is_valid_im_int_type(self, xp): + # Tests is_valid_im(R) with integer type. + R = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.int64) + assert_(is_valid_im(R) is False) + assert_raises(TypeError, is_valid_im, R, throw=True) + + def test_is_valid_im_various_size(self, xp): + for nrow, ncol, valid in [(2, 5, False), (2, 3, False), + (1, 4, True), (2, 4, True)]: + self.check_is_valid_im_various_size(nrow, ncol, valid, xp) + + def check_is_valid_im_various_size(self, nrow, ncol, valid, xp): + # Tests is_valid_im(R) with linkage matrices of various sizes + R = xp.asarray([[0, 1, 3.0, 2, 5], + [3, 2, 4.0, 3, 3]], dtype=xp.float64) + R = R[:nrow, :ncol] + assert_(is_valid_im(R) == valid) + if not valid: + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_empty(self, xp): + # Tests is_valid_im(R) with empty inconsistency matrix. + R = xp.zeros((0, 4), dtype=xp.float64) + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + assert_(is_valid_im(R) is True) + + def test_is_valid_im_4_and_up_neg_index_left(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link height means. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,0] = -2.0 + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up_neg_index_right(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link height standard deviations. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,1] = -2.0 + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up_neg_dist(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link counts. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,2] = -0.5 + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + +class TestNumObsLinkage: + + @skip_if_array_api(cpu_only=True) + def test_num_obs_linkage_empty(self, xp): + # Tests num_obs_linkage(Z) with empty linkage. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, num_obs_linkage, Z) + + def test_num_obs_linkage_1x4(self, xp): + # Tests num_obs_linkage(Z) on linkage over 2 observations. + Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64) + assert_equal(num_obs_linkage(Z), 2) + + def test_num_obs_linkage_2x4(self, xp): + # Tests num_obs_linkage(Z) on linkage over 3 observations. + Z = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.float64) + assert_equal(num_obs_linkage(Z), 3) + + @skip_if_array_api(cpu_only=True) + def test_num_obs_linkage_4_and_up(self, xp): + # Tests num_obs_linkage(Z) on linkage on observation sets between sizes + # 4 and 15 (step size 3). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_equal(num_obs_linkage(Z), i) + + +@skip_if_array_api(cpu_only=True) +class TestLeavesList: + + def test_leaves_list_1x4(self, xp): + # Tests leaves_list(Z) on a 1x4 linkage. + Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64) + to_tree(Z) + assert_allclose(leaves_list(Z), [0, 1], rtol=1e-15) + + def test_leaves_list_2x4(self, xp): + # Tests leaves_list(Z) on a 2x4 linkage. + Z = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.float64) + to_tree(Z) + assert_allclose(leaves_list(Z), [0, 1, 2], rtol=1e-15) + + def test_leaves_list_Q(self, xp): + for method in ['single', 'complete', 'average', 'weighted', 'centroid', + 'median', 'ward']: + self.check_leaves_list_Q(method, xp) + + def check_leaves_list_Q(self, method, xp): + # Tests leaves_list(Z) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + node = to_tree(Z) + assert_allclose(node.pre_order(), leaves_list(Z), rtol=1e-15) + + def test_Q_subtree_pre_order(self, xp): + # Tests that pre_order() works when called on sub-trees. + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, 'single') + node = to_tree(Z) + assert_allclose(node.pre_order(), (node.get_left().pre_order() + + node.get_right().pre_order()), + rtol=1e-15) + + +@skip_if_array_api(cpu_only=True) +class TestCorrespond: + + def test_correspond_empty(self, xp): + # Tests correspond(Z, y) with empty linkage and condensed distance matrix. + y = xp.zeros((0,), dtype=xp.float64) + Z = xp.zeros((0,4), dtype=xp.float64) + assert_raises(ValueError, correspond, Z, y) + + def test_correspond_2_and_up(self, xp): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. + for i in range(2, 4): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_(correspond(Z, y)) + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_(correspond(Z, y)) + + def test_correspond_4_and_up(self, xp): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. Correspondence should be false. + for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) + + list(zip(list(range(3, 5)), list(range(2, 4))))): + y = np.random.rand(i*(i-1)//2) + y2 = np.random.rand(j*(j-1)//2) + y = xp.asarray(y) + y2 = xp.asarray(y2) + Z = linkage(y) + Z2 = linkage(y2) + assert not correspond(Z, y2) + assert not correspond(Z2, y) + + def test_correspond_4_and_up_2(self, xp): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. Correspondence should be false. + for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) + + list(zip(list(range(2, 7)), list(range(16, 21))))): + y = np.random.rand(i*(i-1)//2) + y2 = np.random.rand(j*(j-1)//2) + y = xp.asarray(y) + y2 = xp.asarray(y2) + Z = linkage(y) + Z2 = linkage(y2) + assert not correspond(Z, y2) + assert not correspond(Z2, y) + + def test_num_obs_linkage_multi_matrix(self, xp): + # Tests num_obs_linkage with observation matrices of multiple sizes. + for n in range(2, 10): + X = np.random.rand(n, 4) + Y = pdist(X) + Y = xp.asarray(Y) + Z = linkage(Y) + assert_equal(num_obs_linkage(Z), n) + + +@skip_if_array_api(cpu_only=True) +class TestIsMonotonic: + + def test_is_monotonic_empty(self, xp): + # Tests is_monotonic(Z) on an empty linkage. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, is_monotonic, Z) + + def test_is_monotonic_1x4(self, xp): + # Tests is_monotonic(Z) on 1x4 linkage. Expecting True. + Z = xp.asarray([[0, 1, 0.3, 2]], dtype=xp.float64) + assert is_monotonic(Z) + + def test_is_monotonic_2x4_T(self, xp): + # Tests is_monotonic(Z) on 2x4 linkage. Expecting True. + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 3]], dtype=xp.float64) + assert is_monotonic(Z) + + def test_is_monotonic_2x4_F(self, xp): + # Tests is_monotonic(Z) on 2x4 linkage. Expecting False. + Z = xp.asarray([[0, 1, 0.4, 2], + [2, 3, 0.3, 3]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_3x4_T(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage. Expecting True. + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 2], + [4, 5, 0.6, 4]], dtype=xp.float64) + assert is_monotonic(Z) + + def test_is_monotonic_3x4_F1(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False. + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.2, 2], + [4, 5, 0.6, 4]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_3x4_F2(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False. + Z = xp.asarray([[0, 1, 0.8, 2], + [2, 3, 0.4, 2], + [4, 5, 0.6, 4]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_3x4_F3(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 2], + [4, 5, 0.2, 4]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_tdist_linkage1(self, xp): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # tdist data set. Expecting True. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + assert is_monotonic(Z) + + def test_is_monotonic_tdist_linkage2(self, xp): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # tdist data set. Perturbing. Expecting False. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + Z[2,2] = 0.0 + assert not is_monotonic(Z) + + def test_is_monotonic_Q_linkage(self, xp): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # Q data set. Expecting True. + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, 'single') + assert is_monotonic(Z) + + +@skip_if_array_api(cpu_only=True) +class TestMaxDists: + + def test_maxdists_empty_linkage(self, xp): + # Tests maxdists(Z) on empty linkage. Expecting exception. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, maxdists, Z) + + def test_maxdists_one_cluster_linkage(self, xp): + # Tests maxdists(Z) on linkage with one cluster. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + MD = maxdists(Z) + expectedMD = calculate_maximum_distances(Z, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + def test_maxdists_Q_linkage(self, xp): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + self.check_maxdists_Q_linkage(method, xp) + + def check_maxdists_Q_linkage(self, method, xp): + # Tests maxdists(Z) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + MD = maxdists(Z) + expectedMD = calculate_maximum_distances(Z, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + +class TestMaxInconsts: + + @skip_if_array_api(cpu_only=True) + def test_maxinconsts_empty_linkage(self, xp): + # Tests maxinconsts(Z, R) on empty linkage. Expecting exception. + Z = xp.zeros((0, 4), dtype=xp.float64) + R = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, maxinconsts, Z, R) + + def test_maxinconsts_difrow_linkage(self, xp): + # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with + # different numbers of clusters. Expecting exception. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = np.random.rand(2, 4) + R = xp.asarray(R) + assert_raises(ValueError, maxinconsts, Z, R) + + @skip_if_array_api(cpu_only=True) + def test_maxinconsts_one_cluster_linkage(self, xp): + # Tests maxinconsts(Z, R) on linkage with one cluster. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) + MD = maxinconsts(Z, R) + expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + @skip_if_array_api(cpu_only=True) + def test_maxinconsts_Q_linkage(self, xp): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + self.check_maxinconsts_Q_linkage(method, xp) + + def check_maxinconsts_Q_linkage(self, method, xp): + # Tests maxinconsts(Z, R) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + R = inconsistent(Z) + MD = maxinconsts(Z, R) + expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + +class TestMaxRStat: + + def test_maxRstat_invalid_index(self, xp): + for i in [3.3, -1, 4]: + self.check_maxRstat_invalid_index(i, xp) + + def check_maxRstat_invalid_index(self, i, xp): + # Tests maxRstat(Z, R, i). Expecting exception. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) + if isinstance(i, int): + assert_raises(ValueError, maxRstat, Z, R, i) + else: + assert_raises(TypeError, maxRstat, Z, R, i) + + @skip_if_array_api(cpu_only=True) + def test_maxRstat_empty_linkage(self, xp): + for i in range(4): + self.check_maxRstat_empty_linkage(i, xp) + + def check_maxRstat_empty_linkage(self, i, xp): + # Tests maxRstat(Z, R, i) on empty linkage. Expecting exception. + Z = xp.zeros((0, 4), dtype=xp.float64) + R = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, maxRstat, Z, R, i) + + def test_maxRstat_difrow_linkage(self, xp): + for i in range(4): + self.check_maxRstat_difrow_linkage(i, xp) + + def check_maxRstat_difrow_linkage(self, i, xp): + # Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with + # different numbers of clusters. Expecting exception. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = np.random.rand(2, 4) + R = xp.asarray(R) + assert_raises(ValueError, maxRstat, Z, R, i) + + @skip_if_array_api(cpu_only=True) + def test_maxRstat_one_cluster_linkage(self, xp): + for i in range(4): + self.check_maxRstat_one_cluster_linkage(i, xp) + + def check_maxRstat_one_cluster_linkage(self, i, xp): + # Tests maxRstat(Z, R, i) on linkage with one cluster. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) + MD = maxRstat(Z, R, 1) + expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + @skip_if_array_api(cpu_only=True) + def test_maxRstat_Q_linkage(self, xp): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + for i in range(4): + self.check_maxRstat_Q_linkage(method, i, xp) + + def check_maxRstat_Q_linkage(self, method, i, xp): + # Tests maxRstat(Z, R, i) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + R = inconsistent(Z) + MD = maxRstat(Z, R, 1) + expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + +@skip_if_array_api(cpu_only=True) +class TestDendrogram: + + def test_dendrogram_single_linkage_tdist(self, xp): + # Tests dendrogram calculation on single linkage of the tdist data set. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + R = dendrogram(Z, no_plot=True) + leaves = R["leaves"] + assert_equal(leaves, [2, 5, 1, 0, 3, 4]) + + def test_valid_orientation(self, xp): + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + assert_raises(ValueError, dendrogram, Z, orientation="foo") + + def test_labels_as_array_or_list(self, xp): + # test for gh-12418 + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + labels = xp.asarray([1, 3, 2, 6, 4, 5]) + result1 = dendrogram(Z, labels=labels, no_plot=True) + result2 = dendrogram(Z, labels=list(labels), no_plot=True) + assert result1 == result2 + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_valid_label_size(self, xp): + link = xp.asarray([ + [0, 1, 1.0, 4], + [2, 3, 1.0, 5], + [4, 5, 2.0, 6], + ]) + plt.figure() + with pytest.raises(ValueError) as exc_info: + dendrogram(link, labels=list(range(100))) + assert "Dimensions of Z and labels must be consistent."\ + in str(exc_info.value) + + with pytest.raises( + ValueError, + match="Dimensions of Z and labels must be consistent."): + dendrogram(link, labels=[]) + + plt.close() + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_dendrogram_plot(self, xp): + for orientation in ['top', 'bottom', 'left', 'right']: + self.check_dendrogram_plot(orientation, xp) + + def check_dendrogram_plot(self, orientation, xp): + # Tests dendrogram plotting. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'], + 'dcoord': [[0.0, 138.0, 138.0, 0.0], + [0.0, 219.0, 219.0, 0.0], + [0.0, 255.0, 255.0, 219.0], + [0.0, 268.0, 268.0, 255.0], + [138.0, 295.0, 295.0, 268.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0], + [45.0, 45.0, 55.0, 55.0], + [35.0, 35.0, 50.0, 50.0], + [25.0, 25.0, 42.5, 42.5], + [10.0, 10.0, 33.75, 33.75]], + 'ivl': ['2', '5', '1', '0', '3', '4'], + 'leaves': [2, 5, 1, 0, 3, 4], + 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'], + } + + fig = plt.figure() + ax = fig.add_subplot(221) + + # test that dendrogram accepts ax keyword + R1 = dendrogram(Z, ax=ax, orientation=orientation) + R1['dcoord'] = np.asarray(R1['dcoord']) + assert_equal(R1, expected) + + # test that dendrogram accepts and handle the leaf_font_size and + # leaf_rotation keywords + dendrogram(Z, ax=ax, orientation=orientation, + leaf_font_size=20, leaf_rotation=90) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_rotation(), 90) + assert_equal(testlabel.get_size(), 20) + dendrogram(Z, ax=ax, orientation=orientation, + leaf_rotation=90) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_rotation(), 90) + dendrogram(Z, ax=ax, orientation=orientation, + leaf_font_size=20) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_size(), 20) + plt.close() + + # test plotting to gca (will import pylab) + R2 = dendrogram(Z, orientation=orientation) + plt.close() + R2['dcoord'] = np.asarray(R2['dcoord']) + assert_equal(R2, expected) + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_dendrogram_truncate_mode(self, xp): + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + + R = dendrogram(Z, 2, 'lastp', show_contracted=True) + plt.close() + R['dcoord'] = np.asarray(R['dcoord']) + assert_equal(R, {'color_list': ['C0'], + 'dcoord': [[0.0, 295.0, 295.0, 0.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0]], + 'ivl': ['(2)', '(4)'], + 'leaves': [6, 9], + 'leaves_color_list': ['C0', 'C0'], + }) + + R = dendrogram(Z, 2, 'mtica', show_contracted=True) + plt.close() + R['dcoord'] = np.asarray(R['dcoord']) + assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'], + 'dcoord': [[0.0, 138.0, 138.0, 0.0], + [0.0, 255.0, 255.0, 0.0], + [0.0, 268.0, 268.0, 255.0], + [138.0, 295.0, 295.0, 268.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0], + [35.0, 35.0, 45.0, 45.0], + [25.0, 25.0, 40.0, 40.0], + [10.0, 10.0, 32.5, 32.5]], + 'ivl': ['2', '5', '1', '0', '(2)'], + 'leaves': [2, 5, 1, 0, 7], + 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'], + }) + + def test_dendrogram_colors(self, xp): + # Tests dendrogram plots with alternate colors + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + + set_link_color_palette(['c', 'm', 'y', 'k']) + R = dendrogram(Z, no_plot=True, + above_threshold_color='g', color_threshold=250) + set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k']) + + color_list = R['color_list'] + assert_equal(color_list, ['c', 'm', 'g', 'g', 'g']) + + # reset color palette (global list) + set_link_color_palette(None) + + def test_dendrogram_leaf_colors_zero_dist(self, xp): + # tests that the colors of leafs are correct for tree + # with two identical points + x = xp.asarray([[1, 0, 0], + [0, 0, 1], + [0, 2, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 0]]) + z = linkage(x, "single") + d = dendrogram(z, no_plot=True) + exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2'] + colors = d["leaves_color_list"] + assert_equal(colors, exp_colors) + + def test_dendrogram_leaf_colors(self, xp): + # tests that the colors are correct for a tree + # with two near points ((0, 0, 1.1) and (0, 0, 1)) + x = xp.asarray([[1, 0, 0], + [0, 0, 1.1], + [0, 2, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 0]]) + z = linkage(x, "single") + d = dendrogram(z, no_plot=True) + exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2'] + colors = d["leaves_color_list"] + assert_equal(colors, exp_colors) + + +def calculate_maximum_distances(Z, xp): + # Used for testing correctness of maxdists. + n = Z.shape[0] + 1 + B = xp.zeros((n-1,), dtype=Z.dtype) + q = xp.zeros((3,)) + for i in range(0, n - 1): + q[:] = 0.0 + left = Z[i, 0] + right = Z[i, 1] + if left >= n: + q[0] = B[xp.asarray(left, dtype=xp.int64) - n] + if right >= n: + q[1] = B[xp.asarray(right, dtype=xp.int64) - n] + q[2] = Z[i, 2] + B[i] = xp.max(q) + return B + + +def calculate_maximum_inconsistencies(Z, R, k=3, xp=np): + # Used for testing correctness of maxinconsts. + n = Z.shape[0] + 1 + dtype = xp.result_type(Z, R) + B = xp.zeros((n-1,), dtype=dtype) + q = xp.zeros((3,)) + for i in range(0, n - 1): + q[:] = 0.0 + left = Z[i, 0] + right = Z[i, 1] + if left >= n: + q[0] = B[xp.asarray(left, dtype=xp.int64) - n] + if right >= n: + q[1] = B[xp.asarray(right, dtype=xp.int64) - n] + q[2] = R[i, k] + B[i] = xp.max(q) + return B + + +@skip_if_array_api(cpu_only=True) +def test_unsupported_uncondensed_distance_matrix_linkage_warning(xp): + assert_warns(ClusterWarning, linkage, xp.asarray([[0, 1], [1, 0]])) + + +def test_euclidean_linkage_value_error(xp): + for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS: + assert_raises(ValueError, linkage, xp.asarray([[1, 1], [1, 1]]), + method=method, metric='cityblock') + + +@skip_if_array_api(cpu_only=True) +def test_2x2_linkage(xp): + Z1 = linkage(xp.asarray([1]), method='single', metric='euclidean') + Z2 = linkage(xp.asarray([[0, 1], [0, 0]]), method='single', metric='euclidean') + xp_assert_close(Z1, Z2, rtol=1e-15) + + +@skip_if_array_api(cpu_only=True) +def test_node_compare(xp): + np.random.seed(23) + nobs = 50 + X = np.random.randn(nobs, 4) + X = xp.asarray(X) + Z = scipy.cluster.hierarchy.ward(X) + tree = to_tree(Z) + assert_(tree > tree.get_left()) + assert_(tree.get_right() > tree.get_left()) + assert_(tree.get_right() == tree.get_right()) + assert_(tree.get_right() != tree.get_left()) + + +@skip_if_array_api(np_only=True, reasons=['`cut_tree` uses non-standard indexing']) +def test_cut_tree(xp): + np.random.seed(23) + nobs = 50 + X = np.random.randn(nobs, 4) + X = xp.asarray(X) + Z = scipy.cluster.hierarchy.ward(X) + cutree = cut_tree(Z) + + # cutree.dtype varies between int32 and int64 over platforms + xp_assert_close(cutree[:, 0], xp.arange(nobs), rtol=1e-15, check_dtype=False) + xp_assert_close(cutree[:, -1], xp.zeros(nobs), rtol=1e-15, check_dtype=False) + assert_equal(np.asarray(cutree).max(0), np.arange(nobs - 1, -1, -1)) + + xp_assert_close(cutree[:, [-5]], cut_tree(Z, n_clusters=5), rtol=1e-15) + xp_assert_close(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]), rtol=1e-15) + xp_assert_close(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]), rtol=1e-15) + + nodes = _order_cluster_tree(Z) + heights = xp.asarray([node.dist for node in nodes]) + + xp_assert_close(cutree[:, np.searchsorted(heights, [5])], + cut_tree(Z, height=5), rtol=1e-15) + xp_assert_close(cutree[:, np.searchsorted(heights, [5, 10])], + cut_tree(Z, height=[5, 10]), rtol=1e-15) + xp_assert_close(cutree[:, np.searchsorted(heights, [10, 5])], + cut_tree(Z, height=[10, 5]), rtol=1e-15) + + +@skip_if_array_api(cpu_only=True) +def test_optimal_leaf_ordering(xp): + # test with the distance vector y + Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.ytdist)), + xp.asarray(hierarchy_test_data.ytdist)) + expectedZ = hierarchy_test_data.linkage_ytdist_single_olo + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10) + + # test with the observation matrix X + Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.X), 'ward'), + xp.asarray(hierarchy_test_data.X)) + expectedZ = hierarchy_test_data.linkage_X_ward_olo + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + +@skip_if_array_api(np_only=True, reasons=['`Heap` only supports NumPy backend']) +def test_Heap(xp): + values = xp.asarray([2, -1, 0, -1.5, 3]) + heap = Heap(values) + + pair = heap.get_min() + assert_equal(pair['key'], 3) + assert_equal(pair['value'], -1.5) + + heap.remove_min() + pair = heap.get_min() + assert_equal(pair['key'], 1) + assert_equal(pair['value'], -1) + + heap.change_value(1, 2.5) + pair = heap.get_min() + assert_equal(pair['key'], 2) + assert_equal(pair['value'], 0) + + heap.remove_min() + heap.remove_min() + + heap.change_value(1, 10) + pair = heap.get_min() + assert_equal(pair['key'], 4) + assert_equal(pair['value'], 3) + + heap.remove_min() + pair = heap.get_min() + assert_equal(pair['key'], 1) + assert_equal(pair['value'], 10) diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/vq.py b/venv/lib/python3.10/site-packages/scipy/cluster/vq.py new file mode 100644 index 0000000000000000000000000000000000000000..f4b5179ce524f2dbb15939d004b5f7ff3cfa6847 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/cluster/vq.py @@ -0,0 +1,835 @@ +""" +K-means clustering and vector quantization (:mod:`scipy.cluster.vq`) +==================================================================== + +Provides routines for k-means clustering, generating code books +from k-means models and quantizing vectors by comparing them with +centroids in a code book. + +.. autosummary:: + :toctree: generated/ + + whiten -- Normalize a group of observations so each feature has unit variance + vq -- Calculate code book membership of a set of observation vectors + kmeans -- Perform k-means on a set of observation vectors forming k clusters + kmeans2 -- A different implementation of k-means with more methods + -- for initializing centroids + +Background information +---------------------- +The k-means algorithm takes as input the number of clusters to +generate, k, and a set of observation vectors to cluster. It +returns a set of centroids, one for each of the k clusters. An +observation vector is classified with the cluster number or +centroid index of the centroid closest to it. + +A vector v belongs to cluster i if it is closer to centroid i than +any other centroid. If v belongs to i, we say centroid i is the +dominating centroid of v. The k-means algorithm tries to +minimize distortion, which is defined as the sum of the squared distances +between each observation vector and its dominating centroid. +The minimization is achieved by iteratively reclassifying +the observations into clusters and recalculating the centroids until +a configuration is reached in which the centroids are stable. One can +also define a maximum number of iterations. + +Since vector quantization is a natural application for k-means, +information theory terminology is often used. The centroid index +or cluster index is also referred to as a "code" and the table +mapping codes to centroids and, vice versa, is often referred to as a +"code book". The result of k-means, a set of centroids, can be +used to quantize vectors. Quantization aims to find an encoding of +vectors that reduces the expected distortion. + +All routines expect obs to be an M by N array, where the rows are +the observation vectors. The codebook is a k by N array, where the +ith row is the centroid of code word i. The observation vectors +and centroids have the same feature dimension. + +As an example, suppose we wish to compress a 24-bit color image +(each pixel is represented by one byte for red, one for blue, and +one for green) before sending it over the web. By using a smaller +8-bit encoding, we can reduce the amount of data by two +thirds. Ideally, the colors for each of the 256 possible 8-bit +encoding values should be chosen to minimize distortion of the +color. Running k-means with k=256 generates a code book of 256 +codes, which fills up all possible 8-bit sequences. Instead of +sending a 3-byte value for each pixel, the 8-bit centroid index +(or code word) of the dominating centroid is transmitted. The code +book is also sent over the wire so each 8-bit code can be +translated back to a 24-bit pixel value representation. If the +image of interest was of an ocean, we would expect many 24-bit +blues to be represented by 8-bit codes. If it was an image of a +human face, more flesh-tone colors would be represented in the +code book. + +""" +import warnings +import numpy as np +from collections import deque +from scipy._lib._array_api import ( + _asarray, array_namespace, size, atleast_nd, copy, cov +) +from scipy._lib._util import check_random_state, rng_integers +from scipy.spatial.distance import cdist + +from . import _vq + +__docformat__ = 'restructuredtext' + +__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2'] + + +class ClusterError(Exception): + pass + + +def whiten(obs, check_finite=True): + """ + Normalize a group of observations on a per feature basis. + + Before running k-means, it is beneficial to rescale each feature + dimension of the observation set by its standard deviation (i.e. "whiten" + it - as in "white noise" where each frequency has equal power). + Each feature is divided by its standard deviation across all observations + to give it unit variance. + + Parameters + ---------- + obs : ndarray + Each row of the array is an observation. The + columns are the features seen during each observation. + + >>> # f0 f1 f2 + >>> obs = [[ 1., 1., 1.], #o0 + ... [ 2., 2., 2.], #o1 + ... [ 3., 3., 3.], #o2 + ... [ 4., 4., 4.]] #o3 + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + result : ndarray + Contains the values in `obs` scaled by the standard deviation + of each column. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.vq import whiten + >>> features = np.array([[1.9, 2.3, 1.7], + ... [1.5, 2.5, 2.2], + ... [0.8, 0.6, 1.7,]]) + >>> whiten(features) + array([[ 4.17944278, 2.69811351, 7.21248917], + [ 3.29956009, 2.93273208, 9.33380951], + [ 1.75976538, 0.7038557 , 7.21248917]]) + + """ + xp = array_namespace(obs) + obs = _asarray(obs, check_finite=check_finite, xp=xp) + std_dev = xp.std(obs, axis=0) + zero_std_mask = std_dev == 0 + if xp.any(zero_std_mask): + std_dev[zero_std_mask] = 1.0 + warnings.warn("Some columns have standard deviation zero. " + "The values of these columns will not change.", + RuntimeWarning, stacklevel=2) + return obs / std_dev + + +def vq(obs, code_book, check_finite=True): + """ + Assign codes from a code book to observations. + + Assigns a code from a code book to each observation. Each + observation vector in the 'M' by 'N' `obs` array is compared with the + centroids in the code book and assigned the code of the closest + centroid. + + The features in `obs` should have unit variance, which can be + achieved by passing them through the whiten function. The code + book can be created with the k-means algorithm or a different + encoding algorithm. + + Parameters + ---------- + obs : ndarray + Each row of the 'M' x 'N' array is an observation. The columns are + the "features" seen during each observation. The features must be + whitened first using the whiten function or something equivalent. + code_book : ndarray + The code book is usually generated using the k-means algorithm. + Each row of the array holds a different code, and the columns are + the features of the code. + + >>> # f0 f1 f2 f3 + >>> code_book = [ + ... [ 1., 2., 3., 4.], #c0 + ... [ 1., 2., 3., 4.], #c1 + ... [ 1., 2., 3., 4.]] #c2 + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + code : ndarray + A length M array holding the code book index for each observation. + dist : ndarray + The distortion (distance) between the observation and its nearest + code. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.vq import vq + >>> code_book = np.array([[1., 1., 1.], + ... [2., 2., 2.]]) + >>> features = np.array([[1.9, 2.3, 1.7], + ... [1.5, 2.5, 2.2], + ... [0.8, 0.6, 1.7]]) + >>> vq(features, code_book) + (array([1, 1, 0], dtype=int32), array([0.43588989, 0.73484692, 0.83066239])) + + """ + xp = array_namespace(obs, code_book) + obs = _asarray(obs, xp=xp, check_finite=check_finite) + code_book = _asarray(code_book, xp=xp, check_finite=check_finite) + ct = xp.result_type(obs, code_book) + + c_obs = xp.astype(obs, ct, copy=False) + c_code_book = xp.astype(code_book, ct, copy=False) + + if xp.isdtype(ct, kind='real floating'): + c_obs = np.asarray(c_obs) + c_code_book = np.asarray(c_code_book) + result = _vq.vq(c_obs, c_code_book) + return xp.asarray(result[0]), xp.asarray(result[1]) + return py_vq(obs, code_book, check_finite=False) + + +def py_vq(obs, code_book, check_finite=True): + """ Python version of vq algorithm. + + The algorithm computes the Euclidean distance between each + observation and every frame in the code_book. + + Parameters + ---------- + obs : ndarray + Expects a rank 2 array. Each row is one observation. + code_book : ndarray + Code book to use. Same format than obs. Should have same number of + features (e.g., columns) than obs. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + code : ndarray + code[i] gives the label of the ith obversation; its code is + code_book[code[i]]. + mind_dist : ndarray + min_dist[i] gives the distance between the ith observation and its + corresponding code. + + Notes + ----- + This function is slower than the C version but works for + all input types. If the inputs have the wrong types for the + C versions of the function, this one is called as a last resort. + + It is about 20 times slower than the C version. + + """ + xp = array_namespace(obs, code_book) + obs = _asarray(obs, xp=xp, check_finite=check_finite) + code_book = _asarray(code_book, xp=xp, check_finite=check_finite) + + if obs.ndim != code_book.ndim: + raise ValueError("Observation and code_book should have the same rank") + + if obs.ndim == 1: + obs = obs[:, xp.newaxis] + code_book = code_book[:, xp.newaxis] + + # Once `cdist` has array API support, this `xp.asarray` call can be removed + dist = xp.asarray(cdist(obs, code_book)) + code = xp.argmin(dist, axis=1) + min_dist = xp.min(dist, axis=1) + return code, min_dist + + +def _kmeans(obs, guess, thresh=1e-5, xp=None): + """ "raw" version of k-means. + + Returns + ------- + code_book + The lowest distortion codebook found. + avg_dist + The average distance a observation is from a code in the book. + Lower means the code_book matches the data better. + + See Also + -------- + kmeans : wrapper around k-means + + Examples + -------- + Note: not whitened in this example. + + >>> import numpy as np + >>> from scipy.cluster.vq import _kmeans + >>> features = np.array([[ 1.9,2.3], + ... [ 1.5,2.5], + ... [ 0.8,0.6], + ... [ 0.4,1.8], + ... [ 1.0,1.0]]) + >>> book = np.array((features[0],features[2])) + >>> _kmeans(features,book) + (array([[ 1.7 , 2.4 ], + [ 0.73333333, 1.13333333]]), 0.40563916697728591) + + """ + xp = np if xp is None else xp + code_book = guess + diff = xp.inf + prev_avg_dists = deque([diff], maxlen=2) + while diff > thresh: + # compute membership and distances between obs and code_book + obs_code, distort = vq(obs, code_book, check_finite=False) + prev_avg_dists.append(xp.mean(distort, axis=-1)) + # recalc code_book as centroids of associated obs + obs = np.asarray(obs) + obs_code = np.asarray(obs_code) + code_book, has_members = _vq.update_cluster_means(obs, obs_code, + code_book.shape[0]) + obs = xp.asarray(obs) + obs_code = xp.asarray(obs_code) + code_book = xp.asarray(code_book) + has_members = xp.asarray(has_members) + code_book = code_book[has_members] + diff = xp.abs(prev_avg_dists[0] - prev_avg_dists[1]) + + return code_book, prev_avg_dists[1] + + +def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True, + *, seed=None): + """ + Performs k-means on a set of observation vectors forming k clusters. + + The k-means algorithm adjusts the classification of the observations + into clusters and updates the cluster centroids until the position of + the centroids is stable over successive iterations. In this + implementation of the algorithm, the stability of the centroids is + determined by comparing the absolute value of the change in the average + Euclidean distance between the observations and their corresponding + centroids against a threshold. This yields + a code book mapping centroids to codes and vice versa. + + Parameters + ---------- + obs : ndarray + Each row of the M by N array is an observation vector. The + columns are the features seen during each observation. + The features must be whitened first with the `whiten` function. + + k_or_guess : int or ndarray + The number of centroids to generate. A code is assigned to + each centroid, which is also the row index of the centroid + in the code_book matrix generated. + + The initial k centroids are chosen by randomly selecting + observations from the observation matrix. Alternatively, + passing a k by N array specifies the initial k centroids. + + iter : int, optional + The number of times to run k-means, returning the codebook + with the lowest distortion. This argument is ignored if + initial centroids are specified with an array for the + ``k_or_guess`` parameter. This parameter does not represent the + number of iterations of the k-means algorithm. + + thresh : float, optional + Terminates the k-means algorithm if the change in + distortion since the last k-means iteration is less than + or equal to threshold. + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + Seed for initializing the pseudo-random number generator. + If `seed` is None (or `numpy.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + The default is None. + + Returns + ------- + codebook : ndarray + A k by N array of k centroids. The ith centroid + codebook[i] is represented with the code i. The centroids + and codes generated represent the lowest distortion seen, + not necessarily the globally minimal distortion. + Note that the number of centroids is not necessarily the same as the + ``k_or_guess`` parameter, because centroids assigned to no observations + are removed during iterations. + + distortion : float + The mean (non-squared) Euclidean distance between the observations + passed and the centroids generated. Note the difference to the standard + definition of distortion in the context of the k-means algorithm, which + is the sum of the squared distances. + + See Also + -------- + kmeans2 : a different implementation of k-means clustering + with more methods for generating initial centroids but without + using a distortion change threshold as a stopping criterion. + + whiten : must be called prior to passing an observation matrix + to kmeans. + + Notes + ----- + For more functionalities or optimal performance, you can use + `sklearn.cluster.KMeans `_. + `This `_ + is a benchmark result of several implementations. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.vq import vq, kmeans, whiten + >>> import matplotlib.pyplot as plt + >>> features = np.array([[ 1.9,2.3], + ... [ 1.5,2.5], + ... [ 0.8,0.6], + ... [ 0.4,1.8], + ... [ 0.1,0.1], + ... [ 0.2,1.8], + ... [ 2.0,0.5], + ... [ 0.3,1.5], + ... [ 1.0,1.0]]) + >>> whitened = whiten(features) + >>> book = np.array((whitened[0],whitened[2])) + >>> kmeans(whitened,book) + (array([[ 2.3110306 , 2.86287398], # random + [ 0.93218041, 1.24398691]]), 0.85684700941625547) + + >>> codes = 3 + >>> kmeans(whitened,codes) + (array([[ 2.3110306 , 2.86287398], # random + [ 1.32544402, 0.65607529], + [ 0.40782893, 2.02786907]]), 0.5196582527686241) + + >>> # Create 50 datapoints in two clusters a and b + >>> pts = 50 + >>> rng = np.random.default_rng() + >>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts) + >>> b = rng.multivariate_normal([30, 10], + ... [[10, 2], [2, 1]], + ... size=pts) + >>> features = np.concatenate((a, b)) + >>> # Whiten data + >>> whitened = whiten(features) + >>> # Find 2 clusters in the data + >>> codebook, distortion = kmeans(whitened, 2) + >>> # Plot whitened data and cluster centers in red + >>> plt.scatter(whitened[:, 0], whitened[:, 1]) + >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r') + >>> plt.show() + + """ + if isinstance(k_or_guess, int): + xp = array_namespace(obs) + else: + xp = array_namespace(obs, k_or_guess) + obs = _asarray(obs, xp=xp, check_finite=check_finite) + guess = _asarray(k_or_guess, xp=xp, check_finite=check_finite) + if iter < 1: + raise ValueError("iter must be at least 1, got %s" % iter) + + # Determine whether a count (scalar) or an initial guess (array) was passed. + if size(guess) != 1: + if size(guess) < 1: + raise ValueError("Asked for 0 clusters. Initial book was %s" % + guess) + return _kmeans(obs, guess, thresh=thresh, xp=xp) + + # k_or_guess is a scalar, now verify that it's an integer + k = int(guess) + if k != guess: + raise ValueError("If k_or_guess is a scalar, it must be an integer.") + if k < 1: + raise ValueError("Asked for %d clusters." % k) + + rng = check_random_state(seed) + + # initialize best distance value to a large value + best_dist = xp.inf + for i in range(iter): + # the initial code book is randomly selected from observations + guess = _kpoints(obs, k, rng, xp) + book, dist = _kmeans(obs, guess, thresh=thresh, xp=xp) + if dist < best_dist: + best_book = book + best_dist = dist + return best_book, best_dist + + +def _kpoints(data, k, rng, xp): + """Pick k points at random in data (one row = one observation). + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 are assumed to describe one + dimensional data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + rng : `numpy.random.Generator` or `numpy.random.RandomState` + Random number generator. + + Returns + ------- + x : ndarray + A 'k' by 'N' containing the initial centroids + + """ + idx = rng.choice(data.shape[0], size=int(k), replace=False) + # convert to array with default integer dtype (avoids numpy#25607) + idx = xp.asarray(idx, dtype=xp.asarray([1]).dtype) + return xp.take(data, idx, axis=0) + + +def _krandinit(data, k, rng, xp): + """Returns k samples of a random variable whose parameters depend on data. + + More precisely, it returns k observations sampled from a Gaussian random + variable whose mean and covariances are the ones estimated from the data. + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D + data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + rng : `numpy.random.Generator` or `numpy.random.RandomState` + Random number generator. + + Returns + ------- + x : ndarray + A 'k' by 'N' containing the initial centroids + + """ + mu = xp.mean(data, axis=0) + k = np.asarray(k) + + if data.ndim == 1: + _cov = cov(data) + x = rng.standard_normal(size=k) + x = xp.asarray(x) + x *= xp.sqrt(_cov) + elif data.shape[1] > data.shape[0]: + # initialize when the covariance matrix is rank deficient + _, s, vh = xp.linalg.svd(data - mu, full_matrices=False) + x = rng.standard_normal(size=(k, size(s))) + x = xp.asarray(x) + sVh = s[:, None] * vh / xp.sqrt(data.shape[0] - xp.asarray(1.)) + x = x @ sVh + else: + _cov = atleast_nd(cov(data.T), ndim=2) + + # k rows, d cols (one row = one obs) + # Generate k sample of a random variable ~ Gaussian(mu, cov) + x = rng.standard_normal(size=(k, size(mu))) + x = xp.asarray(x) + x = x @ xp.linalg.cholesky(_cov).T + + x += mu + return x + + +def _kpp(data, k, rng, xp): + """ Picks k points in the data based on the kmeans++ method. + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D + data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + rng : `numpy.random.Generator` or `numpy.random.RandomState` + Random number generator. + + Returns + ------- + init : ndarray + A 'k' by 'N' containing the initial centroids. + + References + ---------- + .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of + careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium + on Discrete Algorithms, 2007. + """ + + ndim = len(data.shape) + if ndim == 1: + data = data[:, None] + + dims = data.shape[1] + + init = xp.empty((int(k), dims)) + + for i in range(k): + if i == 0: + init[i, :] = data[rng_integers(rng, data.shape[0]), :] + + else: + D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0) + probs = D2/D2.sum() + cumprobs = probs.cumsum() + r = rng.uniform() + cumprobs = np.asarray(cumprobs) + init[i, :] = data[np.searchsorted(cumprobs, r), :] + + if ndim == 1: + init = init[:, 0] + return init + + +_valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp} + + +def _missing_warn(): + """Print a warning when called.""" + warnings.warn("One of the clusters is empty. " + "Re-run kmeans with a different initialization.", + stacklevel=3) + + +def _missing_raise(): + """Raise a ClusterError when called.""" + raise ClusterError("One of the clusters is empty. " + "Re-run kmeans with a different initialization.") + + +_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise} + + +def kmeans2(data, k, iter=10, thresh=1e-5, minit='random', + missing='warn', check_finite=True, *, seed=None): + """ + Classify a set of observations into k clusters using the k-means algorithm. + + The algorithm attempts to minimize the Euclidean distance between + observations and centroids. Several initialization methods are + included. + + Parameters + ---------- + data : ndarray + A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length + 'M' array of 'M' 1-D observations. + k : int or ndarray + The number of clusters to form as well as the number of + centroids to generate. If `minit` initialization string is + 'matrix', or if a ndarray is given instead, it is + interpreted as initial cluster to use instead. + iter : int, optional + Number of iterations of the k-means algorithm to run. Note + that this differs in meaning from the iters parameter to + the kmeans function. + thresh : float, optional + (not used yet) + minit : str, optional + Method for initialization. Available methods are 'random', + 'points', '++' and 'matrix': + + 'random': generate k centroids from a Gaussian with mean and + variance estimated from the data. + + 'points': choose k observations (rows) at random from data for + the initial centroids. + + '++': choose k observations accordingly to the kmeans++ method + (careful seeding) + + 'matrix': interpret the k parameter as a k by M (or length k + array for 1-D data) array of initial centroids. + missing : str, optional + Method to deal with empty clusters. Available methods are + 'warn' and 'raise': + + 'warn': give a warning and continue. + + 'raise': raise an ClusterError and terminate the algorithm. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + Seed for initializing the pseudo-random number generator. + If `seed` is None (or `numpy.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + The default is None. + + Returns + ------- + centroid : ndarray + A 'k' by 'N' array of centroids found at the last iteration of + k-means. + label : ndarray + label[i] is the code or index of the centroid the + ith observation is closest to. + + See Also + -------- + kmeans + + References + ---------- + .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of + careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium + on Discrete Algorithms, 2007. + + Examples + -------- + >>> from scipy.cluster.vq import kmeans2 + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + Create z, an array with shape (100, 2) containing a mixture of samples + from three multivariate normal distributions. + + >>> rng = np.random.default_rng() + >>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45) + >>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30) + >>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25) + >>> z = np.concatenate((a, b, c)) + >>> rng.shuffle(z) + + Compute three clusters. + + >>> centroid, label = kmeans2(z, 3, minit='points') + >>> centroid + array([[ 2.22274463, -0.61666946], # may vary + [ 0.54069047, 5.86541444], + [ 6.73846769, 4.01991898]]) + + How many points are in each cluster? + + >>> counts = np.bincount(label) + >>> counts + array([29, 51, 20]) # may vary + + Plot the clusters. + + >>> w0 = z[label == 0] + >>> w1 = z[label == 1] + >>> w2 = z[label == 2] + >>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0') + >>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1') + >>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2') + >>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids') + >>> plt.axis('equal') + >>> plt.legend(shadow=True) + >>> plt.show() + + """ + if int(iter) < 1: + raise ValueError("Invalid iter (%s), " + "must be a positive integer." % iter) + try: + miss_meth = _valid_miss_meth[missing] + except KeyError as e: + raise ValueError(f"Unknown missing method {missing!r}") from e + + if isinstance(k, int): + xp = array_namespace(data) + else: + xp = array_namespace(data, k) + data = _asarray(data, xp=xp, check_finite=check_finite) + code_book = copy(k, xp=xp) + if data.ndim == 1: + d = 1 + elif data.ndim == 2: + d = data.shape[1] + else: + raise ValueError("Input of rank > 2 is not supported.") + + if size(data) < 1 or size(code_book) < 1: + raise ValueError("Empty input is not supported.") + + # If k is not a single value, it should be compatible with data's shape + if minit == 'matrix' or size(code_book) > 1: + if data.ndim != code_book.ndim: + raise ValueError("k array doesn't match data rank") + nc = code_book.shape[0] + if data.ndim > 1 and code_book.shape[1] != d: + raise ValueError("k array doesn't match data dimension") + else: + nc = int(code_book) + + if nc < 1: + raise ValueError("Cannot ask kmeans2 for %d clusters" + " (k was %s)" % (nc, code_book)) + elif nc != code_book: + warnings.warn("k was not an integer, was converted.", stacklevel=2) + + try: + init_meth = _valid_init_meth[minit] + except KeyError as e: + raise ValueError(f"Unknown init method {minit!r}") from e + else: + rng = check_random_state(seed) + code_book = init_meth(data, code_book, rng, xp) + + data = np.asarray(data) + code_book = np.asarray(code_book) + for i in range(iter): + # Compute the nearest neighbor for each obs using the current code book + label = vq(data, code_book, check_finite=check_finite)[0] + # Update the code book by computing centroids + new_code_book, has_members = _vq.update_cluster_means(data, label, nc) + if not has_members.all(): + miss_meth() + # Set the empty clusters to their previous positions + new_code_book[~has_members] = code_book[~has_members] + code_book = new_code_book + + return xp.asarray(code_book), xp.asarray(label) diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..012019b833a14525ce73b51b528ca55ef9a4d262 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b36e39ad2bd7fea6d8c56d3b3a9946b0b3251db Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5af97d8ca74b32e683465eaa42768c3d22344c44 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d41dea786cc1d46395fb8b417bc9b344b15f5c8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4deedbffdb024fefb3f1c8281f32a82938282da9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2dd7f812f618cdd4e674fbf5005b401201ec139 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3045cc5036f58927d0aa2c273472bef664620615 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2edf704c7e885ec01ffe168555a8dc3952eb3a42 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66c8ea959ba1417060ead5cc5257a4339b3d5dcd Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3980d46595d03a33f4c045fa93d1133bbf1b390 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9b813d2726533f0037a3ab80be3c14fae7035b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0745c589c6336c43bec73cd91699819cf95b22aa Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49a3c9b2a21222cade4c8a3fae5f0794a671160d Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f2001c9bda9578caef14dde2789e5e29e03d74e Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f3fc3552431c5088937ebc1844acd5518005e66 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5d95343e1bb14f9e67344cb87b7ae647a0b1312 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8de444555a3fde3763f67526651a741a19c6ff1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d30a321200fbcc4f768a8e6fca7f7c37c3db59a Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b135b2273bfa6de11dd6a0309b2e5bbdfe65978c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c78e90234e29639ad7794a175868de12e282e63 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88bb304e3f80d24855cae4eff7d2a51c823a4d7b Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04c9e32684b6e8bda77a38b71f0281aa0be373dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86f4458e5d62539980a189382e47878f3e2551bd Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f939d5303fc574234c2da4071905c2052081599 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37886b23d1760c0cff1ba103f82731fb3fde865f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10d3501f3cc61474c4a80bad241262a5922e7414 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..addebbe6844740fb52c014f946201ed4d840bdde Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8048a505d7501284cae34bd840f0e21743ad014c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..922af0478b9e1443571e4530a5ebec3940ed8152 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ef2fbc52864172c7210d81a15824de22de63504 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65cec7f9f76ef477f23982b6a12aba77936929b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2f8a1f4d1dab00c9a563d40c2deda0cfdfc8691 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cbd453a79ea35933c76c4113275b268ee51ac53 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__init__.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ca1a0f435498acbeb4dc2fbdadf0baac86efcaa1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__init__.py @@ -0,0 +1,71 @@ +""" +Linear Solvers +============== + +The default solver is SuperLU (included in the scipy distribution), +which can solve real or complex linear systems in both single and +double precisions. It is automatically replaced by UMFPACK, if +available. Note that UMFPACK works in double precision only, so +switch it off by:: + + >>> from scipy.sparse.linalg import spsolve, use_solver + >>> use_solver(useUmfpack=False) + +to solve in the single precision. See also use_solver documentation. + +Example session:: + + >>> from scipy.sparse import csc_matrix, spdiags + >>> from numpy import array + >>> + >>> print("Inverting a sparse linear system:") + >>> print("The sparse matrix (constructed from diagonals):") + >>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) + >>> b = array([1, 2, 3, 4, 5]) + >>> print("Solve: single precision complex:") + >>> use_solver( useUmfpack = False ) + >>> a = a.astype('F') + >>> x = spsolve(a, b) + >>> print(x) + >>> print("Error: ", a@x-b) + >>> + >>> print("Solve: double precision complex:") + >>> use_solver( useUmfpack = True ) + >>> a = a.astype('D') + >>> x = spsolve(a, b) + >>> print(x) + >>> print("Error: ", a@x-b) + >>> + >>> print("Solve: double precision:") + >>> a = a.astype('d') + >>> x = spsolve(a, b) + >>> print(x) + >>> print("Error: ", a@x-b) + >>> + >>> print("Solve: single precision:") + >>> use_solver( useUmfpack = False ) + >>> a = a.astype('f') + >>> x = spsolve(a, b.astype('f')) + >>> print(x) + >>> print("Error: ", a@x-b) + +""" + +#import umfpack +#__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) ) +#del umfpack + +from .linsolve import * +from ._superlu import SuperLU +from . import _add_newdocs +from . import linsolve + +__all__ = [ + 'MatrixRankWarning', 'SuperLU', 'factorized', + 'spilu', 'splu', 'spsolve', + 'spsolve_triangular', 'use_solver' +] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abee149157fae69c03c8cf0c65b6fb1acbe75412 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de717aebad4ca0172ffb7c4696f87516fd2246b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c17b7ab3af5b26b0af80634cd2a9c6d834b4b203 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py new file mode 100644 index 0000000000000000000000000000000000000000..e7f7b1d16fd33b4beb5620e0a931a4c800d11cd4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py @@ -0,0 +1,153 @@ +from numpy.lib import add_newdoc + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', + """ + LU factorization of a sparse matrix. + + Factorization is represented as:: + + Pr @ A @ Pc = L @ U + + To construct these `SuperLU` objects, call the `splu` and `spilu` + functions. + + Attributes + ---------- + shape + nnz + perm_c + perm_r + L + U + + Methods + ------- + solve + + Notes + ----- + + .. versionadded:: 0.14.0 + + Examples + -------- + The LU decomposition can be used to solve matrix equations. Consider: + + >>> import numpy as np + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import splu + >>> A = csc_matrix([[1,2,0,4], [1,0,0,1], [1,0,2,1], [2,2,1,0.]]) + + This can be solved for a given right-hand side: + + >>> lu = splu(A) + >>> b = np.array([1, 2, 3, 4]) + >>> x = lu.solve(b) + >>> A.dot(x) + array([ 1., 2., 3., 4.]) + + The ``lu`` object also contains an explicit representation of the + decomposition. The permutations are represented as mappings of + indices: + + >>> lu.perm_r + array([2, 1, 3, 0], dtype=int32) # may vary + >>> lu.perm_c + array([0, 1, 3, 2], dtype=int32) # may vary + + The L and U factors are sparse matrices in CSC format: + + >>> lu.L.toarray() + array([[ 1. , 0. , 0. , 0. ], # may vary + [ 0.5, 1. , 0. , 0. ], + [ 0.5, -1. , 1. , 0. ], + [ 0.5, 1. , 0. , 1. ]]) + >>> lu.U.toarray() + array([[ 2. , 2. , 0. , 1. ], # may vary + [ 0. , -1. , 1. , -0.5], + [ 0. , 0. , 5. , -1. ], + [ 0. , 0. , 0. , 2. ]]) + + The permutation matrices can be constructed: + + >>> Pr = csc_matrix((np.ones(4), (lu.perm_r, np.arange(4)))) + >>> Pc = csc_matrix((np.ones(4), (np.arange(4), lu.perm_c))) + + We can reassemble the original matrix: + + >>> (Pr.T @ (lu.L @ lu.U) @ Pc.T).toarray() + array([[ 1., 2., 0., 4.], + [ 1., 0., 0., 1.], + [ 1., 0., 2., 1.], + [ 2., 2., 1., 0.]]) + """) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('solve', + """ + solve(rhs[, trans]) + + Solves linear system of equations with one or several right-hand sides. + + Parameters + ---------- + rhs : ndarray, shape (n,) or (n, k) + Right hand side(s) of equation + trans : {'N', 'T', 'H'}, optional + Type of system to solve:: + + 'N': A @ x == rhs (default) + 'T': A^T @ x == rhs + 'H': A^H @ x == rhs + + i.e., normal, transposed, and hermitian conjugate. + + Returns + ------- + x : ndarray, shape ``rhs.shape`` + Solution vector(s) + """)) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('L', + """ + Lower triangular factor with unit diagonal as a + `scipy.sparse.csc_matrix`. + + .. versionadded:: 0.14.0 + """)) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('U', + """ + Upper triangular factor as a `scipy.sparse.csc_matrix`. + + .. versionadded:: 0.14.0 + """)) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('shape', + """ + Shape of the original matrix as a tuple of ints. + """)) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('nnz', + """ + Number of nonzero elements in the matrix. + """)) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_c', + """ + Permutation Pc represented as an array of indices. + + The column permutation matrix can be reconstructed via: + + >>> Pc = np.zeros((n, n)) + >>> Pc[np.arange(n), perm_c] = 1 + """)) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_r', + """ + Permutation Pr represented as an array of indices. + + The row permutation matrix can be reconstructed via: + + >>> Pr = np.zeros((n, n)) + >>> Pr[perm_r, np.arange(n)] = 1 + """)) diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..96a77687bf5e9eb05d5e2d58b19a9cce3acd4234 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py new file mode 100644 index 0000000000000000000000000000000000000000..e37721c76c133ee3ddbd44a6df03dfd99ee41e06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py @@ -0,0 +1,746 @@ +from warnings import warn + +import numpy as np +from numpy import asarray +from scipy.sparse import (issparse, + SparseEfficiencyWarning, csc_matrix, csr_matrix) +from scipy.sparse._sputils import is_pydata_spmatrix, convert_pydata_sparse_to_scipy +from scipy.linalg import LinAlgError +import copy + +from . import _superlu + +noScikit = False +try: + import scikits.umfpack as umfpack +except ImportError: + noScikit = True + +useUmfpack = not noScikit + +__all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized', + 'MatrixRankWarning', 'spsolve_triangular'] + + +class MatrixRankWarning(UserWarning): + pass + + +def use_solver(**kwargs): + """ + Select default sparse direct solver to be used. + + Parameters + ---------- + useUmfpack : bool, optional + Use UMFPACK [1]_, [2]_, [3]_, [4]_. over SuperLU. Has effect only + if ``scikits.umfpack`` is installed. Default: True + assumeSortedIndices : bool, optional + Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix. + Has effect only if useUmfpack is True and ``scikits.umfpack`` is + installed. Default: False + + Notes + ----- + The default sparse solver is UMFPACK when available + (``scikits.umfpack`` is installed). This can be changed by passing + useUmfpack = False, which then causes the always present SuperLU + based solver to be used. + + UMFPACK requires a CSR/CSC matrix to have sorted column/row indices. If + sure that the matrix fulfills this, pass ``assumeSortedIndices=True`` + to gain some speed. + + References + ---------- + .. [1] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern + multifrontal method with a column pre-ordering strategy, ACM + Trans. on Mathematical Software, 30(2), 2004, pp. 196--199. + https://dl.acm.org/doi/abs/10.1145/992200.992206 + + .. [2] T. A. Davis, A column pre-ordering strategy for the + unsymmetric-pattern multifrontal method, ACM Trans. + on Mathematical Software, 30(2), 2004, pp. 165--195. + https://dl.acm.org/doi/abs/10.1145/992200.992205 + + .. [3] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal + method for unsymmetric sparse matrices, ACM Trans. on + Mathematical Software, 25(1), 1999, pp. 1--19. + https://doi.org/10.1145/305658.287640 + + .. [4] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal + method for sparse LU factorization, SIAM J. Matrix Analysis and + Computations, 18(1), 1997, pp. 140--158. + https://doi.org/10.1137/S0895479894246905T. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse.linalg import use_solver, spsolve + >>> from scipy.sparse import csc_matrix + >>> R = np.random.randn(5, 5) + >>> A = csc_matrix(R) + >>> b = np.random.randn(5) + >>> use_solver(useUmfpack=False) # enforce superLU over UMFPACK + >>> x = spsolve(A, b) + >>> np.allclose(A.dot(x), b) + True + >>> use_solver(useUmfpack=True) # reset umfPack usage to default + """ + if 'useUmfpack' in kwargs: + globals()['useUmfpack'] = kwargs['useUmfpack'] + if useUmfpack and 'assumeSortedIndices' in kwargs: + umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices']) + +def _get_umf_family(A): + """Get umfpack family string given the sparse matrix dtype.""" + _families = { + (np.float64, np.int32): 'di', + (np.complex128, np.int32): 'zi', + (np.float64, np.int64): 'dl', + (np.complex128, np.int64): 'zl' + } + + # A.dtype.name can only be "float64" or + # "complex128" in control flow + f_type = getattr(np, A.dtype.name) + # control flow may allow for more index + # types to get through here + i_type = getattr(np, A.indices.dtype.name) + + try: + family = _families[(f_type, i_type)] + + except KeyError as e: + msg = ('only float64 or complex128 matrices with int32 or int64 ' + f'indices are supported! (got: matrix: {f_type}, indices: {i_type})') + raise ValueError(msg) from e + + # See gh-8278. Considered converting only if + # A.shape[0]*A.shape[1] > np.iinfo(np.int32).max, + # but that didn't always fix the issue. + family = family[0] + "l" + A_new = copy.copy(A) + A_new.indptr = np.asarray(A.indptr, dtype=np.int64) + A_new.indices = np.asarray(A.indices, dtype=np.int64) + + return family, A_new + +def _safe_downcast_indices(A): + # check for safe downcasting + max_value = np.iinfo(np.intc).max + + if A.indptr[-1] > max_value: # indptr[-1] is max b/c indptr always sorted + raise ValueError("indptr values too large for SuperLU") + + if max(*A.shape) > max_value: # only check large enough arrays + if np.any(A.indices > max_value): + raise ValueError("indices values too large for SuperLU") + + indices = A.indices.astype(np.intc, copy=False) + indptr = A.indptr.astype(np.intc, copy=False) + return indices, indptr + +def spsolve(A, b, permc_spec=None, use_umfpack=True): + """Solve the sparse linear system Ax=b, where b may be a vector or a matrix. + + Parameters + ---------- + A : ndarray or sparse matrix + The square matrix A will be converted into CSC or CSR form + b : ndarray or sparse matrix + The matrix or vector representing the right hand side of the equation. + If a vector, b.shape must be (n,) or (n, 1). + permc_spec : str, optional + How to permute the columns of the matrix for sparsity preservation. + (default: 'COLAMD') + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering [1]_, [2]_. + + use_umfpack : bool, optional + if True (default) then use UMFPACK for the solution [3]_, [4]_, [5]_, + [6]_ . This is only referenced if b is a vector and + ``scikits.umfpack`` is installed. + + Returns + ------- + x : ndarray or sparse matrix + the solution of the sparse linear equation. + If b is a vector, then x is a vector of size A.shape[1] + If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1]) + + Notes + ----- + For solving the matrix expression AX = B, this solver assumes the resulting + matrix X is sparse, as is often the case for very sparse inputs. If the + resulting X is dense, the construction of this sparse result will be + relatively expensive. In that case, consider converting A to a dense + matrix and using scipy.linalg.solve or its variants. + + References + ---------- + .. [1] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836: + COLAMD, an approximate column minimum degree ordering algorithm, + ACM Trans. on Mathematical Software, 30(3), 2004, pp. 377--380. + :doi:`10.1145/1024074.1024080` + + .. [2] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, A column approximate + minimum degree ordering algorithm, ACM Trans. on Mathematical + Software, 30(3), 2004, pp. 353--376. :doi:`10.1145/1024074.1024079` + + .. [3] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern + multifrontal method with a column pre-ordering strategy, ACM + Trans. on Mathematical Software, 30(2), 2004, pp. 196--199. + https://dl.acm.org/doi/abs/10.1145/992200.992206 + + .. [4] T. A. Davis, A column pre-ordering strategy for the + unsymmetric-pattern multifrontal method, ACM Trans. + on Mathematical Software, 30(2), 2004, pp. 165--195. + https://dl.acm.org/doi/abs/10.1145/992200.992205 + + .. [5] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal + method for unsymmetric sparse matrices, ACM Trans. on + Mathematical Software, 25(1), 1999, pp. 1--19. + https://doi.org/10.1145/305658.287640 + + .. [6] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal + method for sparse LU factorization, SIAM J. Matrix Analysis and + Computations, 18(1), 1997, pp. 140--158. + https://doi.org/10.1137/S0895479894246905T. + + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import spsolve + >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float) + >>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float) + >>> x = spsolve(A, B) + >>> np.allclose(A.dot(x).toarray(), B.toarray()) + True + """ + is_pydata_sparse = is_pydata_spmatrix(b) + pydata_sparse_cls = b.__class__ if is_pydata_sparse else None + A = convert_pydata_sparse_to_scipy(A) + b = convert_pydata_sparse_to_scipy(b) + + if not (issparse(A) and A.format in ("csc", "csr")): + A = csc_matrix(A) + warn('spsolve requires A be CSC or CSR matrix format', + SparseEfficiencyWarning, stacklevel=2) + + # b is a vector only if b have shape (n,) or (n, 1) + b_is_sparse = issparse(b) + if not b_is_sparse: + b = asarray(b) + b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1)) + + # sum duplicates for non-canonical format + A.sum_duplicates() + A = A._asfptype() # upcast to a floating point format + result_dtype = np.promote_types(A.dtype, b.dtype) + if A.dtype != result_dtype: + A = A.astype(result_dtype) + if b.dtype != result_dtype: + b = b.astype(result_dtype) + + # validate input shapes + M, N = A.shape + if (M != N): + raise ValueError(f"matrix must be square (has shape {(M, N)})") + + if M != b.shape[0]: + raise ValueError(f"matrix - rhs dimension mismatch ({A.shape} - {b.shape[0]})") + + use_umfpack = use_umfpack and useUmfpack + + if b_is_vector and use_umfpack: + if b_is_sparse: + b_vec = b.toarray() + else: + b_vec = b + b_vec = asarray(b_vec, dtype=A.dtype).ravel() + + if noScikit: + raise RuntimeError('Scikits.umfpack not installed.') + + if A.dtype.char not in 'dD': + raise ValueError("convert matrix data to double, please, using" + " .astype(), or set linsolve.useUmfpack = False") + + umf_family, A = _get_umf_family(A) + umf = umfpack.UmfpackContext(umf_family) + x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec, + autoTranspose=True) + else: + if b_is_vector and b_is_sparse: + b = b.toarray() + b_is_sparse = False + + if not b_is_sparse: + if A.format == "csc": + flag = 1 # CSC format + else: + flag = 0 # CSR format + + indices = A.indices.astype(np.intc, copy=False) + indptr = A.indptr.astype(np.intc, copy=False) + options = dict(ColPerm=permc_spec) + x, info = _superlu.gssv(N, A.nnz, A.data, indices, indptr, + b, flag, options=options) + if info != 0: + warn("Matrix is exactly singular", MatrixRankWarning, stacklevel=2) + x.fill(np.nan) + if b_is_vector: + x = x.ravel() + else: + # b is sparse + Afactsolve = factorized(A) + + if not (b.format == "csc" or is_pydata_spmatrix(b)): + warn('spsolve is more efficient when sparse b ' + 'is in the CSC matrix format', + SparseEfficiencyWarning, stacklevel=2) + b = csc_matrix(b) + + # Create a sparse output matrix by repeatedly applying + # the sparse factorization to solve columns of b. + data_segs = [] + row_segs = [] + col_segs = [] + for j in range(b.shape[1]): + # TODO: replace this with + # bj = b[:, j].toarray().ravel() + # once 1D sparse arrays are supported. + # That is a slightly faster code path. + bj = b[:, [j]].toarray().ravel() + xj = Afactsolve(bj) + w = np.flatnonzero(xj) + segment_length = w.shape[0] + row_segs.append(w) + col_segs.append(np.full(segment_length, j, dtype=int)) + data_segs.append(np.asarray(xj[w], dtype=A.dtype)) + sparse_data = np.concatenate(data_segs) + sparse_row = np.concatenate(row_segs) + sparse_col = np.concatenate(col_segs) + x = A.__class__((sparse_data, (sparse_row, sparse_col)), + shape=b.shape, dtype=A.dtype) + + if is_pydata_sparse: + x = pydata_sparse_cls.from_scipy_sparse(x) + + return x + + +def splu(A, permc_spec=None, diag_pivot_thresh=None, + relax=None, panel_size=None, options=dict()): + """ + Compute the LU decomposition of a sparse, square matrix. + + Parameters + ---------- + A : sparse matrix + Sparse matrix to factorize. Most efficient when provided in CSC + format. Other formats will be converted to CSC before factorization. + permc_spec : str, optional + How to permute the columns of the matrix for sparsity preservation. + (default: 'COLAMD') + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering + + diag_pivot_thresh : float, optional + Threshold used for a diagonal entry to be an acceptable pivot. + See SuperLU user's guide for details [1]_ + relax : int, optional + Expert option for customizing the degree of relaxing supernodes. + See SuperLU user's guide for details [1]_ + panel_size : int, optional + Expert option for customizing the panel size. + See SuperLU user's guide for details [1]_ + options : dict, optional + Dictionary containing additional expert options to SuperLU. + See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument) + for more details. For example, you can specify + ``options=dict(Equil=False, IterRefine='SINGLE'))`` + to turn equilibration off and perform a single iterative refinement. + + Returns + ------- + invA : scipy.sparse.linalg.SuperLU + Object, which has a ``solve`` method. + + See also + -------- + spilu : incomplete LU decomposition + + Notes + ----- + This function uses the SuperLU library. + + References + ---------- + .. [1] SuperLU https://portal.nersc.gov/project/sparse/superlu/ + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import splu + >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float) + >>> B = splu(A) + >>> x = np.array([1., 2., 3.], dtype=float) + >>> B.solve(x) + array([ 1. , -3. , -1.5]) + >>> A.dot(B.solve(x)) + array([ 1., 2., 3.]) + >>> B.solve(A.dot(x)) + array([ 1., 2., 3.]) + """ + + if is_pydata_spmatrix(A): + def csc_construct_func(*a, cls=type(A)): + return cls.from_scipy_sparse(csc_matrix(*a)) + A = A.to_scipy_sparse().tocsc() + else: + csc_construct_func = csc_matrix + + if not (issparse(A) and A.format == "csc"): + A = csc_matrix(A) + warn('splu converted its input to CSC format', + SparseEfficiencyWarning, stacklevel=2) + + # sum duplicates for non-canonical format + A.sum_duplicates() + A = A._asfptype() # upcast to a floating point format + + M, N = A.shape + if (M != N): + raise ValueError("can only factor square matrices") # is this true? + + indices, indptr = _safe_downcast_indices(A) + + _options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, + PanelSize=panel_size, Relax=relax) + if options is not None: + _options.update(options) + + # Ensure that no column permutations are applied + if (_options["ColPerm"] == "NATURAL"): + _options["SymmetricMode"] = True + + return _superlu.gstrf(N, A.nnz, A.data, indices, indptr, + csc_construct_func=csc_construct_func, + ilu=False, options=_options) + + +def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None, + diag_pivot_thresh=None, relax=None, panel_size=None, options=None): + """ + Compute an incomplete LU decomposition for a sparse, square matrix. + + The resulting object is an approximation to the inverse of `A`. + + Parameters + ---------- + A : (N, N) array_like + Sparse matrix to factorize. Most efficient when provided in CSC format. + Other formats will be converted to CSC before factorization. + drop_tol : float, optional + Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition. + (default: 1e-4) + fill_factor : float, optional + Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10) + drop_rule : str, optional + Comma-separated string of drop rules to use. + Available rules: ``basic``, ``prows``, ``column``, ``area``, + ``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``) + + See SuperLU documentation for details. + + Remaining other options + Same as for `splu` + + Returns + ------- + invA_approx : scipy.sparse.linalg.SuperLU + Object, which has a ``solve`` method. + + See also + -------- + splu : complete LU decomposition + + Notes + ----- + To improve the better approximation to the inverse, you may need to + increase `fill_factor` AND decrease `drop_tol`. + + This function uses the SuperLU library. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import spilu + >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float) + >>> B = spilu(A) + >>> x = np.array([1., 2., 3.], dtype=float) + >>> B.solve(x) + array([ 1. , -3. , -1.5]) + >>> A.dot(B.solve(x)) + array([ 1., 2., 3.]) + >>> B.solve(A.dot(x)) + array([ 1., 2., 3.]) + """ + + if is_pydata_spmatrix(A): + def csc_construct_func(*a, cls=type(A)): + return cls.from_scipy_sparse(csc_matrix(*a)) + A = A.to_scipy_sparse().tocsc() + else: + csc_construct_func = csc_matrix + + if not (issparse(A) and A.format == "csc"): + A = csc_matrix(A) + warn('spilu converted its input to CSC format', + SparseEfficiencyWarning, stacklevel=2) + + # sum duplicates for non-canonical format + A.sum_duplicates() + A = A._asfptype() # upcast to a floating point format + + M, N = A.shape + if (M != N): + raise ValueError("can only factor square matrices") # is this true? + + indices, indptr = _safe_downcast_indices(A) + + _options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol, + ILU_FillFactor=fill_factor, + DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, + PanelSize=panel_size, Relax=relax) + if options is not None: + _options.update(options) + + # Ensure that no column permutations are applied + if (_options["ColPerm"] == "NATURAL"): + _options["SymmetricMode"] = True + + return _superlu.gstrf(N, A.nnz, A.data, indices, indptr, + csc_construct_func=csc_construct_func, + ilu=True, options=_options) + + +def factorized(A): + """ + Return a function for solving a sparse linear system, with A pre-factorized. + + Parameters + ---------- + A : (N, N) array_like + Input. A in CSC format is most efficient. A CSR format matrix will + be converted to CSC before factorization. + + Returns + ------- + solve : callable + To solve the linear system of equations given in `A`, the `solve` + callable should be passed an ndarray of shape (N,). + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse.linalg import factorized + >>> from scipy.sparse import csc_matrix + >>> A = np.array([[ 3. , 2. , -1. ], + ... [ 2. , -2. , 4. ], + ... [-1. , 0.5, -1. ]]) + >>> solve = factorized(csc_matrix(A)) # Makes LU decomposition. + >>> rhs1 = np.array([1, -2, 0]) + >>> solve(rhs1) # Uses the LU factors. + array([ 1., -2., -2.]) + + """ + if is_pydata_spmatrix(A): + A = A.to_scipy_sparse().tocsc() + + if useUmfpack: + if noScikit: + raise RuntimeError('Scikits.umfpack not installed.') + + if not (issparse(A) and A.format == "csc"): + A = csc_matrix(A) + warn('splu converted its input to CSC format', + SparseEfficiencyWarning, stacklevel=2) + + A = A._asfptype() # upcast to a floating point format + + if A.dtype.char not in 'dD': + raise ValueError("convert matrix data to double, please, using" + " .astype(), or set linsolve.useUmfpack = False") + + umf_family, A = _get_umf_family(A) + umf = umfpack.UmfpackContext(umf_family) + + # Make LU decomposition. + umf.numeric(A) + + def solve(b): + with np.errstate(divide="ignore", invalid="ignore"): + # Ignoring warnings with numpy >= 1.23.0, see gh-16523 + result = umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True) + + return result + + return solve + else: + return splu(A).solve + + +def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False, + unit_diagonal=False): + """ + Solve the equation ``A x = b`` for `x`, assuming A is a triangular matrix. + + Parameters + ---------- + A : (M, M) sparse matrix + A sparse square triangular matrix. Should be in CSR format. + b : (M,) or (M, N) array_like + Right-hand side matrix in ``A x = b`` + lower : bool, optional + Whether `A` is a lower or upper triangular matrix. + Default is lower triangular matrix. + overwrite_A : bool, optional + Allow changing `A`. The indices of `A` are going to be sorted and zero + entries are going to be removed. + Enabling gives a performance gain. Default is False. + overwrite_b : bool, optional + Allow overwriting data in `b`. + Enabling gives a performance gain. Default is False. + If `overwrite_b` is True, it should be ensured that + `b` has an appropriate dtype to be able to store the result. + unit_diagonal : bool, optional + If True, diagonal elements of `a` are assumed to be 1 and will not be + referenced. + + .. versionadded:: 1.4.0 + + Returns + ------- + x : (M,) or (M, N) ndarray + Solution to the system ``A x = b``. Shape of return matches shape + of `b`. + + Raises + ------ + LinAlgError + If `A` is singular or not triangular. + ValueError + If shape of `A` or shape of `b` do not match the requirements. + + Notes + ----- + .. versionadded:: 0.19.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csr_matrix + >>> from scipy.sparse.linalg import spsolve_triangular + >>> A = csr_matrix([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float) + >>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float) + >>> x = spsolve_triangular(A, B) + >>> np.allclose(A.dot(x), B) + True + """ + + if is_pydata_spmatrix(A): + A = A.to_scipy_sparse().tocsr() + + # Check the input for correct type and format. + if not (issparse(A) and A.format == "csr"): + warn('CSR matrix format is required. Converting to CSR matrix.', + SparseEfficiencyWarning, stacklevel=2) + A = csr_matrix(A) + elif not overwrite_A: + A = A.copy() + + if A.shape[0] != A.shape[1]: + raise ValueError( + f'A must be a square matrix but its shape is {A.shape}.') + + # sum duplicates for non-canonical format + A.sum_duplicates() + + b = np.asanyarray(b) + + if b.ndim not in [1, 2]: + raise ValueError( + f'b must have 1 or 2 dims but its shape is {b.shape}.') + if A.shape[0] != b.shape[0]: + raise ValueError( + 'The size of the dimensions of A must be equal to ' + 'the size of the first dimension of b but the shape of A is ' + f'{A.shape} and the shape of b is {b.shape}.' + ) + + # Init x as (a copy of) b. + x_dtype = np.result_type(A.data, b, np.float64) + if overwrite_b: + if np.can_cast(b.dtype, x_dtype, casting='same_kind'): + x = b + else: + raise ValueError( + f'Cannot overwrite b (dtype {b.dtype}) with result ' + f'of type {x_dtype}.' + ) + else: + x = b.astype(x_dtype, copy=True) + + # Choose forward or backward order. + if lower: + row_indices = range(len(b)) + else: + row_indices = range(len(b) - 1, -1, -1) + + # Fill x iteratively. + for i in row_indices: + + # Get indices for i-th row. + indptr_start = A.indptr[i] + indptr_stop = A.indptr[i + 1] + + if lower: + A_diagonal_index_row_i = indptr_stop - 1 + A_off_diagonal_indices_row_i = slice(indptr_start, indptr_stop - 1) + else: + A_diagonal_index_row_i = indptr_start + A_off_diagonal_indices_row_i = slice(indptr_start + 1, indptr_stop) + + # Check regularity and triangularity of A. + if not unit_diagonal and (indptr_stop <= indptr_start + or A.indices[A_diagonal_index_row_i] < i): + raise LinAlgError( + f'A is singular: diagonal {i} is zero.') + if not unit_diagonal and A.indices[A_diagonal_index_row_i] > i: + raise LinAlgError( + 'A is not triangular: A[{}, {}] is nonzero.' + ''.format(i, A.indices[A_diagonal_index_row_i])) + + # Incorporate off-diagonal entries. + A_column_indices_in_row_i = A.indices[A_off_diagonal_indices_row_i] + A_values_in_row_i = A.data[A_off_diagonal_indices_row_i] + x[i] -= np.dot(x[A_column_indices_in_row_i].T, A_values_in_row_i) + + # Compute i-th entry of x. + if not unit_diagonal: + x[i] /= A.data[A_diagonal_index_row_i] + + return x diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e45657086b12a47c878338868a4d0cbe3c1a7124 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4e5d3660a656407815ccb0d4c72f6fb8834fe37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py new file mode 100644 index 0000000000000000000000000000000000000000..f1684b562ff20812a4280e3bdbac2f56086c5b1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py @@ -0,0 +1,805 @@ +import sys +import threading + +import numpy as np +from numpy import array, finfo, arange, eye, all, unique, ones, dot +import numpy.random as random +from numpy.testing import ( + assert_array_almost_equal, assert_almost_equal, + assert_equal, assert_array_equal, assert_, assert_allclose, + assert_warns, suppress_warnings) +import pytest +from pytest import raises as assert_raises + +import scipy.linalg +from scipy.linalg import norm, inv +from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix, + csr_matrix, identity, issparse, dok_matrix, lil_matrix, bsr_matrix) +from scipy.sparse.linalg import SuperLU +from scipy.sparse.linalg._dsolve import (spsolve, use_solver, splu, spilu, + MatrixRankWarning, _superlu, spsolve_triangular, factorized) +import scipy.sparse + +from scipy._lib._testutils import check_free_memory +from scipy._lib._util import ComplexWarning + + +sup_sparse_efficiency = suppress_warnings() +sup_sparse_efficiency.filter(SparseEfficiencyWarning) + +# scikits.umfpack is not a SciPy dependency but it is optionally used in +# dsolve, so check whether it's available +try: + import scikits.umfpack as umfpack + has_umfpack = True +except ImportError: + has_umfpack = False + +def toarray(a): + if issparse(a): + return a.toarray() + else: + return a + + +def setup_bug_8278(): + N = 2 ** 6 + h = 1/N + Ah1D = scipy.sparse.diags([-1, 2, -1], [-1, 0, 1], + shape=(N-1, N-1))/(h**2) + eyeN = scipy.sparse.eye(N - 1) + A = (scipy.sparse.kron(eyeN, scipy.sparse.kron(eyeN, Ah1D)) + + scipy.sparse.kron(eyeN, scipy.sparse.kron(Ah1D, eyeN)) + + scipy.sparse.kron(Ah1D, scipy.sparse.kron(eyeN, eyeN))) + b = np.random.rand((N-1)**3) + return A, b + + +class TestFactorized: + def setup_method(self): + n = 5 + d = arange(n) + 1 + self.n = n + self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc() + random.seed(1234) + + def _check_singular(self): + A = csc_matrix((5,5), dtype='d') + b = ones(5) + assert_array_almost_equal(0. * b, factorized(A)(b)) + + def _check_non_singular(self): + # Make a diagonal dominant, to make sure it is not singular + n = 5 + a = csc_matrix(random.rand(n, n)) + b = ones(n) + + expected = splu(a).solve(b) + assert_array_almost_equal(factorized(a)(b), expected) + + def test_singular_without_umfpack(self): + use_solver(useUmfpack=False) + with assert_raises(RuntimeError, match="Factor is exactly singular"): + self._check_singular() + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_singular_with_umfpack(self): + use_solver(useUmfpack=True) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars") + assert_warns(umfpack.UmfpackWarning, self._check_singular) + + def test_non_singular_without_umfpack(self): + use_solver(useUmfpack=False) + self._check_non_singular() + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_non_singular_with_umfpack(self): + use_solver(useUmfpack=True) + self._check_non_singular() + + def test_cannot_factorize_nonsquare_matrix_without_umfpack(self): + use_solver(useUmfpack=False) + msg = "can only factor square matrices" + with assert_raises(ValueError, match=msg): + factorized(self.A[:, :4]) + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_factorizes_nonsquare_matrix_with_umfpack(self): + use_solver(useUmfpack=True) + # does not raise + factorized(self.A[:,:4]) + + def test_call_with_incorrectly_sized_matrix_without_umfpack(self): + use_solver(useUmfpack=False) + solve = factorized(self.A) + b = random.rand(4) + B = random.rand(4, 3) + BB = random.rand(self.n, 3, 9) + + with assert_raises(ValueError, match="is of incompatible size"): + solve(b) + with assert_raises(ValueError, match="is of incompatible size"): + solve(B) + with assert_raises(ValueError, + match="object too deep for desired array"): + solve(BB) + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_call_with_incorrectly_sized_matrix_with_umfpack(self): + use_solver(useUmfpack=True) + solve = factorized(self.A) + b = random.rand(4) + B = random.rand(4, 3) + BB = random.rand(self.n, 3, 9) + + # does not raise + solve(b) + msg = "object too deep for desired array" + with assert_raises(ValueError, match=msg): + solve(B) + with assert_raises(ValueError, match=msg): + solve(BB) + + def test_call_with_cast_to_complex_without_umfpack(self): + use_solver(useUmfpack=False) + solve = factorized(self.A) + b = random.rand(4) + for t in [np.complex64, np.complex128]: + with assert_raises(TypeError, match="Cannot cast array data"): + solve(b.astype(t)) + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_call_with_cast_to_complex_with_umfpack(self): + use_solver(useUmfpack=True) + solve = factorized(self.A) + b = random.rand(4) + for t in [np.complex64, np.complex128]: + assert_warns(ComplexWarning, solve, b.astype(t)) + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_assume_sorted_indices_flag(self): + # a sparse matrix with unsorted indices + unsorted_inds = np.array([2, 0, 1, 0]) + data = np.array([10, 16, 5, 0.4]) + indptr = np.array([0, 1, 2, 4]) + A = csc_matrix((data, unsorted_inds, indptr), (3, 3)) + b = ones(3) + + # should raise when incorrectly assuming indices are sorted + use_solver(useUmfpack=True, assumeSortedIndices=True) + with assert_raises(RuntimeError, + match="UMFPACK_ERROR_invalid_matrix"): + factorized(A) + + # should sort indices and succeed when not assuming indices are sorted + use_solver(useUmfpack=True, assumeSortedIndices=False) + expected = splu(A.copy()).solve(b) + + assert_equal(A.has_sorted_indices, 0) + assert_array_almost_equal(factorized(A)(b), expected) + + @pytest.mark.slow + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_bug_8278(self): + check_free_memory(8000) + use_solver(useUmfpack=True) + A, b = setup_bug_8278() + A = A.tocsc() + f = factorized(A) + x = f(b) + assert_array_almost_equal(A @ x, b) + + +class TestLinsolve: + def setup_method(self): + use_solver(useUmfpack=False) + + def test_singular(self): + A = csc_matrix((5,5), dtype='d') + b = array([1, 2, 3, 4, 5],dtype='d') + with suppress_warnings() as sup: + sup.filter(MatrixRankWarning, "Matrix is exactly singular") + x = spsolve(A, b) + assert_(not np.isfinite(x).any()) + + def test_singular_gh_3312(self): + # "Bad" test case that leads SuperLU to call LAPACK with invalid + # arguments. Check that it fails moderately gracefully. + ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32) + v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296]) + A = csc_matrix((v, ij.T), shape=(20, 20)) + b = np.arange(20) + + try: + # should either raise a runtime error or return value + # appropriate for singular input (which yields the warning) + with suppress_warnings() as sup: + sup.filter(MatrixRankWarning, "Matrix is exactly singular") + x = spsolve(A, b) + assert not np.isfinite(x).any() + except RuntimeError: + pass + + @pytest.mark.parametrize('format', ['csc', 'csr']) + @pytest.mark.parametrize('idx_dtype', [np.int32, np.int64]) + def test_twodiags(self, format: str, idx_dtype: np.dtype): + A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5, + format=format) + b = array([1, 2, 3, 4, 5]) + + # condition number of A + cond_A = norm(A.toarray(), 2) * norm(inv(A.toarray()), 2) + + for t in ['f','d','F','D']: + eps = finfo(t).eps # floating point epsilon + b = b.astype(t) + Asp = A.astype(t) + Asp.indices = Asp.indices.astype(idx_dtype, copy=False) + Asp.indptr = Asp.indptr.astype(idx_dtype, copy=False) + + x = spsolve(Asp, b) + assert_(norm(b - Asp@x) < 10 * cond_A * eps) + + def test_bvector_smoketest(self): + Adense = array([[0., 1., 1.], + [1., 0., 1.], + [0., 0., 1.]]) + As = csc_matrix(Adense) + random.seed(1234) + x = random.randn(3) + b = As@x + x2 = spsolve(As, b) + + assert_array_almost_equal(x, x2) + + def test_bmatrix_smoketest(self): + Adense = array([[0., 1., 1.], + [1., 0., 1.], + [0., 0., 1.]]) + As = csc_matrix(Adense) + random.seed(1234) + x = random.randn(3, 4) + Bdense = As.dot(x) + Bs = csc_matrix(Bdense) + x2 = spsolve(As, Bs) + assert_array_almost_equal(x, x2.toarray()) + + @sup_sparse_efficiency + def test_non_square(self): + # A is not square. + A = ones((3, 4)) + b = ones((4, 1)) + assert_raises(ValueError, spsolve, A, b) + # A2 and b2 have incompatible shapes. + A2 = csc_matrix(eye(3)) + b2 = array([1.0, 2.0]) + assert_raises(ValueError, spsolve, A2, b2) + + @sup_sparse_efficiency + def test_example_comparison(self): + row = array([0,0,1,2,2,2]) + col = array([0,2,2,0,1,2]) + data = array([1,2,3,-4,5,6]) + sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float) + M = sM.toarray() + + row = array([0,0,1,1,0,0]) + col = array([0,2,1,1,0,0]) + data = array([1,1,1,1,1,1]) + sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float) + N = sN.toarray() + + sX = spsolve(sM, sN) + X = scipy.linalg.solve(M, N) + + assert_array_almost_equal(X, sX.toarray()) + + @sup_sparse_efficiency + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_shape_compatibility(self): + use_solver(useUmfpack=True) + A = csc_matrix([[1., 0], [0, 2]]) + bs = [ + [1, 6], + array([1, 6]), + [[1], [6]], + array([[1], [6]]), + csc_matrix([[1], [6]]), + csr_matrix([[1], [6]]), + dok_matrix([[1], [6]]), + bsr_matrix([[1], [6]]), + array([[1., 2., 3.], [6., 8., 10.]]), + csc_matrix([[1., 2., 3.], [6., 8., 10.]]), + csr_matrix([[1., 2., 3.], [6., 8., 10.]]), + dok_matrix([[1., 2., 3.], [6., 8., 10.]]), + bsr_matrix([[1., 2., 3.], [6., 8., 10.]]), + ] + + for b in bs: + x = np.linalg.solve(A.toarray(), toarray(b)) + for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]: + x1 = spsolve(spmattype(A), b, use_umfpack=True) + x2 = spsolve(spmattype(A), b, use_umfpack=False) + + # check solution + if x.ndim == 2 and x.shape[1] == 1: + # interprets also these as "vectors" + x = x.ravel() + + assert_array_almost_equal(toarray(x1), x, + err_msg=repr((b, spmattype, 1))) + assert_array_almost_equal(toarray(x2), x, + err_msg=repr((b, spmattype, 2))) + + # dense vs. sparse output ("vectors" are always dense) + if issparse(b) and x.ndim > 1: + assert_(issparse(x1), repr((b, spmattype, 1))) + assert_(issparse(x2), repr((b, spmattype, 2))) + else: + assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1))) + assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2))) + + # check output shape + if x.ndim == 1: + # "vector" + assert_equal(x1.shape, (A.shape[1],)) + assert_equal(x2.shape, (A.shape[1],)) + else: + # "matrix" + assert_equal(x1.shape, x.shape) + assert_equal(x2.shape, x.shape) + + A = csc_matrix((3, 3)) + b = csc_matrix((1, 3)) + assert_raises(ValueError, spsolve, A, b) + + @sup_sparse_efficiency + def test_ndarray_support(self): + A = array([[1., 2.], [2., 0.]]) + x = array([[1., 1.], [0.5, -0.5]]) + b = array([[2., 0.], [2., 2.]]) + + assert_array_almost_equal(x, spsolve(A, b)) + + def test_gssv_badinput(self): + N = 10 + d = arange(N) + 1.0 + A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N) + + for spmatrix in (csc_matrix, csr_matrix): + A = spmatrix(A) + b = np.arange(N) + + def not_c_contig(x): + return x.repeat(2)[::2] + + def not_1dim(x): + return x[:,None] + + def bad_type(x): + return x.astype(bool) + + def too_short(x): + return x[:-1] + + badops = [not_c_contig, not_1dim, bad_type, too_short] + + for badop in badops: + msg = f"{spmatrix!r} {badop!r}" + # Not C-contiguous + assert_raises((ValueError, TypeError), _superlu.gssv, + N, A.nnz, badop(A.data), A.indices, A.indptr, + b, int(spmatrix == csc_matrix), err_msg=msg) + assert_raises((ValueError, TypeError), _superlu.gssv, + N, A.nnz, A.data, badop(A.indices), A.indptr, + b, int(spmatrix == csc_matrix), err_msg=msg) + assert_raises((ValueError, TypeError), _superlu.gssv, + N, A.nnz, A.data, A.indices, badop(A.indptr), + b, int(spmatrix == csc_matrix), err_msg=msg) + + def test_sparsity_preservation(self): + ident = csc_matrix([ + [1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + b = csc_matrix([ + [0, 1], + [1, 0], + [0, 0]]) + x = spsolve(ident, b) + assert_equal(ident.nnz, 3) + assert_equal(b.nnz, 2) + assert_equal(x.nnz, 2) + assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12) + + def test_dtype_cast(self): + A_real = scipy.sparse.csr_matrix([[1, 2, 0], + [0, 0, 3], + [4, 0, 5]]) + A_complex = scipy.sparse.csr_matrix([[1, 2, 0], + [0, 0, 3], + [4, 0, 5 + 1j]]) + b_real = np.array([1,1,1]) + b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1]) + x = spsolve(A_real, b_real) + assert_(np.issubdtype(x.dtype, np.floating)) + x = spsolve(A_real, b_complex) + assert_(np.issubdtype(x.dtype, np.complexfloating)) + x = spsolve(A_complex, b_real) + assert_(np.issubdtype(x.dtype, np.complexfloating)) + x = spsolve(A_complex, b_complex) + assert_(np.issubdtype(x.dtype, np.complexfloating)) + + @pytest.mark.slow + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_bug_8278(self): + check_free_memory(8000) + use_solver(useUmfpack=True) + A, b = setup_bug_8278() + x = spsolve(A, b) + assert_array_almost_equal(A @ x, b) + + +class TestSplu: + def setup_method(self): + use_solver(useUmfpack=False) + n = 40 + d = arange(n) + 1 + self.n = n + self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n, format='csc') + random.seed(1234) + + def _smoketest(self, spxlu, check, dtype, idx_dtype): + if np.issubdtype(dtype, np.complexfloating): + A = self.A + 1j*self.A.T + else: + A = self.A + + A = A.astype(dtype) + A.indices = A.indices.astype(idx_dtype, copy=False) + A.indptr = A.indptr.astype(idx_dtype, copy=False) + lu = spxlu(A) + + rng = random.RandomState(1234) + + # Input shapes + for k in [None, 1, 2, self.n, self.n+2]: + msg = f"k={k!r}" + + if k is None: + b = rng.rand(self.n) + else: + b = rng.rand(self.n, k) + + if np.issubdtype(dtype, np.complexfloating): + b = b + 1j*rng.rand(*b.shape) + b = b.astype(dtype) + + x = lu.solve(b) + check(A, b, x, msg) + + x = lu.solve(b, 'T') + check(A.T, b, x, msg) + + x = lu.solve(b, 'H') + check(A.T.conj(), b, x, msg) + + @sup_sparse_efficiency + def test_splu_smoketest(self): + self._internal_test_splu_smoketest() + + def _internal_test_splu_smoketest(self): + # Check that splu works at all + def check(A, b, x, msg=""): + eps = np.finfo(A.dtype).eps + r = A @ x + assert_(abs(r - b).max() < 1e3*eps, msg) + + for dtype in [np.float32, np.float64, np.complex64, np.complex128]: + for idx_dtype in [np.int32, np.int64]: + self._smoketest(splu, check, dtype, idx_dtype) + + @sup_sparse_efficiency + def test_spilu_smoketest(self): + self._internal_test_spilu_smoketest() + + def _internal_test_spilu_smoketest(self): + errors = [] + + def check(A, b, x, msg=""): + r = A @ x + err = abs(r - b).max() + assert_(err < 1e-2, msg) + if b.dtype in (np.float64, np.complex128): + errors.append(err) + + for dtype in [np.float32, np.float64, np.complex64, np.complex128]: + for idx_dtype in [np.int32, np.int64]: + self._smoketest(spilu, check, dtype, idx_dtype) + + assert_(max(errors) > 1e-5) + + @sup_sparse_efficiency + def test_spilu_drop_rule(self): + # Test passing in the drop_rule argument to spilu. + A = identity(2) + + rules = [ + b'basic,area'.decode('ascii'), # unicode + b'basic,area', # ascii + [b'basic', b'area'.decode('ascii')] + ] + for rule in rules: + # Argument should be accepted + assert_(isinstance(spilu(A, drop_rule=rule), SuperLU)) + + def test_splu_nnz0(self): + A = csc_matrix((5,5), dtype='d') + assert_raises(RuntimeError, splu, A) + + def test_spilu_nnz0(self): + A = csc_matrix((5,5), dtype='d') + assert_raises(RuntimeError, spilu, A) + + def test_splu_basic(self): + # Test basic splu functionality. + n = 30 + rng = random.RandomState(12) + a = rng.rand(n, n) + a[a < 0.95] = 0 + # First test with a singular matrix + a[:, 0] = 0 + a_ = csc_matrix(a) + # Matrix is exactly singular + assert_raises(RuntimeError, splu, a_) + + # Make a diagonal dominant, to make sure it is not singular + a += 4*eye(n) + a_ = csc_matrix(a) + lu = splu(a_) + b = ones(n) + x = lu.solve(b) + assert_almost_equal(dot(a, x), b) + + def test_splu_perm(self): + # Test the permutation vectors exposed by splu. + n = 30 + a = random.random((n, n)) + a[a < 0.95] = 0 + # Make a diagonal dominant, to make sure it is not singular + a += 4*eye(n) + a_ = csc_matrix(a) + lu = splu(a_) + # Check that the permutation indices do belong to [0, n-1]. + for perm in (lu.perm_r, lu.perm_c): + assert_(all(perm > -1)) + assert_(all(perm < n)) + assert_equal(len(unique(perm)), len(perm)) + + # Now make a symmetric, and test that the two permutation vectors are + # the same + # Note: a += a.T relies on undefined behavior. + a = a + a.T + a_ = csc_matrix(a) + lu = splu(a_) + assert_array_equal(lu.perm_r, lu.perm_c) + + @pytest.mark.parametrize("splu_fun, rtol", [(splu, 1e-7), (spilu, 1e-1)]) + def test_natural_permc(self, splu_fun, rtol): + # Test that the "NATURAL" permc_spec does not permute the matrix + np.random.seed(42) + n = 500 + p = 0.01 + A = scipy.sparse.random(n, n, p) + x = np.random.rand(n) + # Make A diagonal dominant to make sure it is not singular + A += (n+1)*scipy.sparse.identity(n) + A_ = csc_matrix(A) + b = A_ @ x + + # without permc_spec, permutation is not identity + lu = splu_fun(A_) + assert_(np.any(lu.perm_c != np.arange(n))) + + # with permc_spec="NATURAL", permutation is identity + lu = splu_fun(A_, permc_spec="NATURAL") + assert_array_equal(lu.perm_c, np.arange(n)) + + # Also, lu decomposition is valid + x2 = lu.solve(b) + assert_allclose(x, x2, rtol=rtol) + + @pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount") + def test_lu_refcount(self): + # Test that we are keeping track of the reference count with splu. + n = 30 + a = random.random((n, n)) + a[a < 0.95] = 0 + # Make a diagonal dominant, to make sure it is not singular + a += 4*eye(n) + a_ = csc_matrix(a) + lu = splu(a_) + + # And now test that we don't have a refcount bug + rc = sys.getrefcount(lu) + for attr in ('perm_r', 'perm_c'): + perm = getattr(lu, attr) + assert_equal(sys.getrefcount(lu), rc + 1) + del perm + assert_equal(sys.getrefcount(lu), rc) + + def test_bad_inputs(self): + A = self.A.tocsc() + + assert_raises(ValueError, splu, A[:,:4]) + assert_raises(ValueError, spilu, A[:,:4]) + + for lu in [splu(A), spilu(A)]: + b = random.rand(42) + B = random.rand(42, 3) + BB = random.rand(self.n, 3, 9) + assert_raises(ValueError, lu.solve, b) + assert_raises(ValueError, lu.solve, B) + assert_raises(ValueError, lu.solve, BB) + assert_raises(TypeError, lu.solve, + b.astype(np.complex64)) + assert_raises(TypeError, lu.solve, + b.astype(np.complex128)) + + @sup_sparse_efficiency + def test_superlu_dlamch_i386_nan(self): + # SuperLU 4.3 calls some functions returning floats without + # declaring them. On i386@linux call convention, this fails to + # clear floating point registers after call. As a result, NaN + # can appear in the next floating point operation made. + # + # Here's a test case that triggered the issue. + n = 8 + d = np.arange(n) + 1 + A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n) + A = A.astype(np.float32) + spilu(A) + A = A + 1j*A + B = A.A + assert_(not np.isnan(B).any()) + + @sup_sparse_efficiency + def test_lu_attr(self): + + def check(dtype, complex_2=False): + A = self.A.astype(dtype) + + if complex_2: + A = A + 1j*A.T + + n = A.shape[0] + lu = splu(A) + + # Check that the decomposition is as advertised + + Pc = np.zeros((n, n)) + Pc[np.arange(n), lu.perm_c] = 1 + + Pr = np.zeros((n, n)) + Pr[lu.perm_r, np.arange(n)] = 1 + + Ad = A.toarray() + lhs = Pr.dot(Ad).dot(Pc) + rhs = (lu.L @ lu.U).toarray() + + eps = np.finfo(dtype).eps + + assert_allclose(lhs, rhs, atol=100*eps) + + check(np.float32) + check(np.float64) + check(np.complex64) + check(np.complex128) + check(np.complex64, True) + check(np.complex128, True) + + @pytest.mark.slow + @sup_sparse_efficiency + def test_threads_parallel(self): + oks = [] + + def worker(): + try: + self.test_splu_basic() + self._internal_test_splu_smoketest() + self._internal_test_spilu_smoketest() + oks.append(True) + except Exception: + pass + + threads = [threading.Thread(target=worker) + for k in range(20)] + for t in threads: + t.start() + for t in threads: + t.join() + + assert_equal(len(oks), 20) + + +class TestSpsolveTriangular: + def setup_method(self): + use_solver(useUmfpack=False) + + def test_zero_diagonal(self): + n = 5 + rng = np.random.default_rng(43876432987) + A = rng.standard_normal((n, n)) + b = np.arange(n) + A = scipy.sparse.tril(A, k=0, format='csr') + + x = spsolve_triangular(A, b, unit_diagonal=True, lower=True) + + A.setdiag(1) + assert_allclose(A.dot(x), b) + + # Regression test from gh-15199 + A = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]], dtype=np.float64) + b = np.array([1., 2., 3.]) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, "CSR matrix format is") + spsolve_triangular(A, b, unit_diagonal=True) + + def test_singular(self): + n = 5 + A = csr_matrix((n, n)) + b = np.arange(n) + for lower in (True, False): + assert_raises(scipy.linalg.LinAlgError, + spsolve_triangular, A, b, lower=lower) + + @sup_sparse_efficiency + def test_bad_shape(self): + # A is not square. + A = np.zeros((3, 4)) + b = ones((4, 1)) + assert_raises(ValueError, spsolve_triangular, A, b) + # A2 and b2 have incompatible shapes. + A2 = csr_matrix(eye(3)) + b2 = array([1.0, 2.0]) + assert_raises(ValueError, spsolve_triangular, A2, b2) + + @sup_sparse_efficiency + def test_input_types(self): + A = array([[1., 0.], [1., 2.]]) + b = array([[2., 0.], [2., 2.]]) + for matrix_type in (array, csc_matrix, csr_matrix): + x = spsolve_triangular(matrix_type(A), b, lower=True) + assert_array_almost_equal(A.dot(x), b) + + @pytest.mark.slow + @pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job + @sup_sparse_efficiency + def test_random(self): + def random_triangle_matrix(n, lower=True): + A = scipy.sparse.random(n, n, density=0.1, format='coo') + if lower: + A = scipy.sparse.tril(A) + else: + A = scipy.sparse.triu(A) + A = A.tocsr(copy=False) + for i in range(n): + A[i, i] = np.random.rand() + 1 + return A + + np.random.seed(1234) + for lower in (True, False): + for n in (10, 10**2, 10**3): + A = random_triangle_matrix(n, lower=lower) + for m in (1, 10): + for b in (np.random.rand(n, m), + np.random.randint(-9, 9, (n, m)), + np.random.randint(-9, 9, (n, m)) + + np.random.randint(-9, 9, (n, m)) * 1j): + x = spsolve_triangular(A, b, lower=lower) + assert_array_almost_equal(A.dot(x), b) + x = spsolve_triangular(A, b, lower=lower, + unit_diagonal=True) + A.setdiag(1) + assert_array_almost_equal(A.dot(x), b) diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af23d6f96ba6f85941088525c8abc2159f227bf8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/COPYING b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..e87667e1b8c178e53c6a7c6268ebc09ab4b0476c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/COPYING @@ -0,0 +1,45 @@ + +BSD Software License + +Pertains to ARPACK and P_ARPACK + +Copyright (c) 1996-2008 Rice University. +Developed by D.C. Sorensen, R.B. Lehoucq, C. Yang, and K. Maschhoff. +All rights reserved. + +Arpack has been renamed to arpack-ng. + +Copyright (c) 2001-2011 - Scilab Enterprises +Updated by Allan Cornet, Sylvestre Ledru. + +Copyright (c) 2010 - Jordi Gutiérrez Hermoso (Octave patch) + +Copyright (c) 2007 - Sébastien Fabbro (gentoo patch) + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + +- Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__init__.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..679b94480d7ff5a11e037ffb758f2214c6e5097f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__init__.py @@ -0,0 +1,20 @@ +""" +Eigenvalue solver using iterative methods. + +Find k eigenvectors and eigenvalues of a matrix A using the +Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_. + +These methods are most useful for large sparse matrices. + + - eigs(A,k) + - eigsh(A,k) + +References +---------- +.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ +.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: + Solution of Large Scale Eigenvalue Problems by Implicitly Restarted + Arnoldi Methods. SIAM, Philadelphia, PA, 1998. + +""" +from .arpack import * diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d733c3bf5a97034476c30b17276dc2497837f3ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16d7f89871b6f91c8b15ce0b07e0dbfb95c64e90 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2037f64d51527318b00afd5bd9c3fc4552a75721 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/arpack.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/arpack.py new file mode 100644 index 0000000000000000000000000000000000000000..f7a6fa218ca462212f1ed8a2fc0bd0037cf9d44b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/arpack.py @@ -0,0 +1,1702 @@ +""" +Find a few eigenvectors and eigenvalues of a matrix. + + +Uses ARPACK: https://github.com/opencollab/arpack-ng + +""" +# Wrapper implementation notes +# +# ARPACK Entry Points +# ------------------- +# The entry points to ARPACK are +# - (s,d)seupd : single and double precision symmetric matrix +# - (s,d,c,z)neupd: single,double,complex,double complex general matrix +# This wrapper puts the *neupd (general matrix) interfaces in eigs() +# and the *seupd (symmetric matrix) in eigsh(). +# There is no specialized interface for complex Hermitian matrices. +# To find eigenvalues of a complex Hermitian matrix you +# may use eigsh(), but eigsh() will simply call eigs() +# and return the real part of the eigenvalues thus obtained. + +# Number of eigenvalues returned and complex eigenvalues +# ------------------------------------------------------ +# The ARPACK nonsymmetric real and double interface (s,d)naupd return +# eigenvalues and eigenvectors in real (float,double) arrays. +# Since the eigenvalues and eigenvectors are, in general, complex +# ARPACK puts the real and imaginary parts in consecutive entries +# in real-valued arrays. This wrapper puts the real entries +# into complex data types and attempts to return the requested eigenvalues +# and eigenvectors. + + +# Solver modes +# ------------ +# ARPACK and handle shifted and shift-inverse computations +# for eigenvalues by providing a shift (sigma) and a solver. + +import numpy as np +import warnings +from scipy.sparse.linalg._interface import aslinearoperator, LinearOperator +from scipy.sparse import eye, issparse +from scipy.linalg import eig, eigh, lu_factor, lu_solve +from scipy.sparse._sputils import isdense, is_pydata_spmatrix +from scipy.sparse.linalg import gmres, splu +from scipy._lib._util import _aligned_zeros +from scipy._lib._threadsafety import ReentrancyLock + +from . import _arpack +arpack_int = _arpack.timing.nbx.dtype + +__docformat__ = "restructuredtext en" + +__all__ = ['eigs', 'eigsh', 'ArpackError', 'ArpackNoConvergence'] + + +_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'} +_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12} + +DNAUPD_ERRORS = { + 0: "Normal exit.", + 1: "Maximum number of iterations taken. " + "All possible eigenvalues of OP has been found. IPARAM(5) " + "returns the number of wanted converged Ritz values.", + 2: "No longer an informational error. Deprecated starting " + "with release 2 of ARPACK.", + 3: "No shifts could be applied during a cycle of the " + "Implicitly restarted Arnoldi iteration. One possibility " + "is to increase the size of NCV relative to NEV. ", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV-NEV >= 2 and less than or equal to N.", + -4: "The maximum number of Arnoldi update iterations allowed " + "must be greater than zero.", + -5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work array WORKL is not sufficient.", + -8: "Error return from LAPACK eigenvalue calculation;", + -9: "Starting vector is zero.", + -10: "IPARAM(7) must be 1,2,3,4.", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", + -12: "IPARAM(1) must be equal to 0 or 1.", + -13: "NEV and WHICH = 'BE' are incompatible.", + -9999: "Could not build an Arnoldi factorization. " + "IPARAM(5) returns the size of the current Arnoldi " + "factorization. The user is advised to check that " + "enough workspace and array storage has been allocated." +} + +SNAUPD_ERRORS = DNAUPD_ERRORS + +ZNAUPD_ERRORS = DNAUPD_ERRORS.copy() +ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3." + +CNAUPD_ERRORS = ZNAUPD_ERRORS + +DSAUPD_ERRORS = { + 0: "Normal exit.", + 1: "Maximum number of iterations taken. " + "All possible eigenvalues of OP has been found.", + 2: "No longer an informational error. Deprecated starting with " + "release 2 of ARPACK.", + 3: "No shifts could be applied during a cycle of the Implicitly " + "restarted Arnoldi iteration. One possibility is to increase " + "the size of NCV relative to NEV. ", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV must be greater than NEV and less than or equal to N.", + -4: "The maximum number of Arnoldi update iterations allowed " + "must be greater than zero.", + -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work array WORKL is not sufficient.", + -8: "Error return from trid. eigenvalue calculation; " + "Informational error from LAPACK routine dsteqr .", + -9: "Starting vector is zero.", + -10: "IPARAM(7) must be 1,2,3,4,5.", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", + -12: "IPARAM(1) must be equal to 0 or 1.", + -13: "NEV and WHICH = 'BE' are incompatible. ", + -9999: "Could not build an Arnoldi factorization. " + "IPARAM(5) returns the size of the current Arnoldi " + "factorization. The user is advised to check that " + "enough workspace and array storage has been allocated.", +} + +SSAUPD_ERRORS = DSAUPD_ERRORS + +DNEUPD_ERRORS = { + 0: "Normal exit.", + 1: "The Schur form computed by LAPACK routine dlahqr " + "could not be reordered by LAPACK routine dtrsen. " + "Re-enter subroutine dneupd with IPARAM(5)NCV and " + "increase the size of the arrays DR and DI to have " + "dimension at least dimension NCV and allocate at least NCV " + "columns for Z. NOTE: Not necessary if Z and V share " + "the same space. Please notify the authors if this error" + "occurs.", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV-NEV >= 2 and less than or equal to N.", + -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work WORKL array is not sufficient.", + -8: "Error return from calculation of a real Schur form. " + "Informational error from LAPACK routine dlahqr .", + -9: "Error return from calculation of eigenvectors. " + "Informational error from LAPACK routine dtrevc.", + -10: "IPARAM(7) must be 1,2,3,4.", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", + -12: "HOWMNY = 'S' not yet implemented", + -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", + -14: "DNAUPD did not find any eigenvalues to sufficient " + "accuracy.", + -15: "DNEUPD got a different count of the number of converged " + "Ritz values than DNAUPD got. This indicates the user " + "probably made an error in passing data from DNAUPD to " + "DNEUPD or that the data was modified before entering " + "DNEUPD", +} + +SNEUPD_ERRORS = DNEUPD_ERRORS.copy() +SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr " + "could not be reordered by LAPACK routine strsen . " + "Re-enter subroutine dneupd with IPARAM(5)=NCV and " + "increase the size of the arrays DR and DI to have " + "dimension at least dimension NCV and allocate at least " + "NCV columns for Z. NOTE: Not necessary if Z and V share " + "the same space. Please notify the authors if this error " + "occurs.") +SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient " + "accuracy.") +SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of " + "converged Ritz values than SNAUPD got. This indicates " + "the user probably made an error in passing data from " + "SNAUPD to SNEUPD or that the data was modified before " + "entering SNEUPD") + +ZNEUPD_ERRORS = {0: "Normal exit.", + 1: "The Schur form computed by LAPACK routine csheqr " + "could not be reordered by LAPACK routine ztrsen. " + "Re-enter subroutine zneupd with IPARAM(5)=NCV and " + "increase the size of the array D to have " + "dimension at least dimension NCV and allocate at least " + "NCV columns for Z. NOTE: Not necessary if Z and V share " + "the same space. Please notify the authors if this error " + "occurs.", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV-NEV >= 1 and less than or equal to N.", + -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work WORKL array is not sufficient.", + -8: "Error return from LAPACK eigenvalue calculation. " + "This should never happened.", + -9: "Error return from calculation of eigenvectors. " + "Informational error from LAPACK routine ztrevc.", + -10: "IPARAM(7) must be 1,2,3", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", + -12: "HOWMNY = 'S' not yet implemented", + -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", + -14: "ZNAUPD did not find any eigenvalues to sufficient " + "accuracy.", + -15: "ZNEUPD got a different count of the number of " + "converged Ritz values than ZNAUPD got. This " + "indicates the user probably made an error in passing " + "data from ZNAUPD to ZNEUPD or that the data was " + "modified before entering ZNEUPD" + } + +CNEUPD_ERRORS = ZNEUPD_ERRORS.copy() +CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient " + "accuracy.") +CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of " + "converged Ritz values than CNAUPD got. This indicates " + "the user probably made an error in passing data from " + "CNAUPD to CNEUPD or that the data was modified before " + "entering CNEUPD") + +DSEUPD_ERRORS = { + 0: "Normal exit.", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV must be greater than NEV and less than or equal to N.", + -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work WORKL array is not sufficient.", + -8: ("Error return from trid. eigenvalue calculation; " + "Information error from LAPACK routine dsteqr."), + -9: "Starting vector is zero.", + -10: "IPARAM(7) must be 1,2,3,4,5.", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", + -12: "NEV and WHICH = 'BE' are incompatible.", + -14: "DSAUPD did not find any eigenvalues to sufficient accuracy.", + -15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.", + -16: "HOWMNY = 'S' not yet implemented", + -17: ("DSEUPD got a different count of the number of converged " + "Ritz values than DSAUPD got. This indicates the user " + "probably made an error in passing data from DSAUPD to " + "DSEUPD or that the data was modified before entering " + "DSEUPD.") +} + +SSEUPD_ERRORS = DSEUPD_ERRORS.copy() +SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues " + "to sufficient accuracy.") +SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of " + "converged " + "Ritz values than SSAUPD got. This indicates the user " + "probably made an error in passing data from SSAUPD to " + "SSEUPD or that the data was modified before entering " + "SSEUPD.") + +_SAUPD_ERRORS = {'d': DSAUPD_ERRORS, + 's': SSAUPD_ERRORS} +_NAUPD_ERRORS = {'d': DNAUPD_ERRORS, + 's': SNAUPD_ERRORS, + 'z': ZNAUPD_ERRORS, + 'c': CNAUPD_ERRORS} +_SEUPD_ERRORS = {'d': DSEUPD_ERRORS, + 's': SSEUPD_ERRORS} +_NEUPD_ERRORS = {'d': DNEUPD_ERRORS, + 's': SNEUPD_ERRORS, + 'z': ZNEUPD_ERRORS, + 'c': CNEUPD_ERRORS} + +# accepted values of parameter WHICH in _SEUPD +_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE'] + +# accepted values of parameter WHICH in _NAUPD +_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI'] + + +class ArpackError(RuntimeError): + """ + ARPACK error + """ + + def __init__(self, info, infodict=_NAUPD_ERRORS): + msg = infodict.get(info, "Unknown error") + RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg)) + + +class ArpackNoConvergence(ArpackError): + """ + ARPACK iteration did not converge + + Attributes + ---------- + eigenvalues : ndarray + Partial result. Converged eigenvalues. + eigenvectors : ndarray + Partial result. Converged eigenvectors. + + """ + + def __init__(self, msg, eigenvalues, eigenvectors): + ArpackError.__init__(self, -1, {-1: msg}) + self.eigenvalues = eigenvalues + self.eigenvectors = eigenvectors + + +def choose_ncv(k): + """ + Choose number of lanczos vectors based on target number + of singular/eigen values and vectors to compute, k. + """ + return max(2 * k + 1, 20) + + +class _ArpackParams: + def __init__(self, n, k, tp, mode=1, sigma=None, + ncv=None, v0=None, maxiter=None, which="LM", tol=0): + if k <= 0: + raise ValueError("k must be positive, k=%d" % k) + + if maxiter is None: + maxiter = n * 10 + if maxiter <= 0: + raise ValueError("maxiter must be positive, maxiter=%d" % maxiter) + + if tp not in 'fdFD': + raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'") + + if v0 is not None: + # ARPACK overwrites its initial resid, make a copy + self.resid = np.array(v0, copy=True) + info = 1 + else: + # ARPACK will use a random initial vector. + self.resid = np.zeros(n, tp) + info = 0 + + if sigma is None: + #sigma not used + self.sigma = 0 + else: + self.sigma = sigma + + if ncv is None: + ncv = choose_ncv(k) + ncv = min(ncv, n) + + self.v = np.zeros((n, ncv), tp) # holds Ritz vectors + self.iparam = np.zeros(11, arpack_int) + + # set solver mode and parameters + ishfts = 1 + self.mode = mode + self.iparam[0] = ishfts + self.iparam[2] = maxiter + self.iparam[3] = 1 + self.iparam[6] = mode + + self.n = n + self.tol = tol + self.k = k + self.maxiter = maxiter + self.ncv = ncv + self.which = which + self.tp = tp + self.info = info + + self.converged = False + self.ido = 0 + + def _raise_no_convergence(self): + msg = "No convergence (%d iterations, %d/%d eigenvectors converged)" + k_ok = self.iparam[4] + num_iter = self.iparam[2] + try: + ev, vec = self.extract(True) + except ArpackError as err: + msg = f"{msg} [{err}]" + ev = np.zeros((0,)) + vec = np.zeros((self.n, 0)) + k_ok = 0 + raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec) + + +class _SymmetricArpackParams(_ArpackParams): + def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, + Minv_matvec=None, sigma=None, + ncv=None, v0=None, maxiter=None, which="LM", tol=0): + # The following modes are supported: + # mode = 1: + # Solve the standard eigenvalue problem: + # A*x = lambda*x : + # A - symmetric + # Arguments should be + # matvec = left multiplication by A + # M_matvec = None [not used] + # Minv_matvec = None [not used] + # + # mode = 2: + # Solve the general eigenvalue problem: + # A*x = lambda*M*x + # A - symmetric + # M - symmetric positive definite + # Arguments should be + # matvec = left multiplication by A + # M_matvec = left multiplication by M + # Minv_matvec = left multiplication by M^-1 + # + # mode = 3: + # Solve the general eigenvalue problem in shift-invert mode: + # A*x = lambda*M*x + # A - symmetric + # M - symmetric positive semi-definite + # Arguments should be + # matvec = None [not used] + # M_matvec = left multiplication by M + # or None, if M is the identity + # Minv_matvec = left multiplication by [A-sigma*M]^-1 + # + # mode = 4: + # Solve the general eigenvalue problem in Buckling mode: + # A*x = lambda*AG*x + # A - symmetric positive semi-definite + # AG - symmetric indefinite + # Arguments should be + # matvec = left multiplication by A + # M_matvec = None [not used] + # Minv_matvec = left multiplication by [A-sigma*AG]^-1 + # + # mode = 5: + # Solve the general eigenvalue problem in Cayley-transformed mode: + # A*x = lambda*M*x + # A - symmetric + # M - symmetric positive semi-definite + # Arguments should be + # matvec = left multiplication by A + # M_matvec = left multiplication by M + # or None, if M is the identity + # Minv_matvec = left multiplication by [A-sigma*M]^-1 + if mode == 1: + if matvec is None: + raise ValueError("matvec must be specified for mode=1") + if M_matvec is not None: + raise ValueError("M_matvec cannot be specified for mode=1") + if Minv_matvec is not None: + raise ValueError("Minv_matvec cannot be specified for mode=1") + + self.OP = matvec + self.B = lambda x: x + self.bmat = 'I' + elif mode == 2: + if matvec is None: + raise ValueError("matvec must be specified for mode=2") + if M_matvec is None: + raise ValueError("M_matvec must be specified for mode=2") + if Minv_matvec is None: + raise ValueError("Minv_matvec must be specified for mode=2") + + self.OP = lambda x: Minv_matvec(matvec(x)) + self.OPa = Minv_matvec + self.OPb = matvec + self.B = M_matvec + self.bmat = 'G' + elif mode == 3: + if matvec is not None: + raise ValueError("matvec must not be specified for mode=3") + if Minv_matvec is None: + raise ValueError("Minv_matvec must be specified for mode=3") + + if M_matvec is None: + self.OP = Minv_matvec + self.OPa = Minv_matvec + self.B = lambda x: x + self.bmat = 'I' + else: + self.OP = lambda x: Minv_matvec(M_matvec(x)) + self.OPa = Minv_matvec + self.B = M_matvec + self.bmat = 'G' + elif mode == 4: + if matvec is None: + raise ValueError("matvec must be specified for mode=4") + if M_matvec is not None: + raise ValueError("M_matvec must not be specified for mode=4") + if Minv_matvec is None: + raise ValueError("Minv_matvec must be specified for mode=4") + self.OPa = Minv_matvec + self.OP = lambda x: self.OPa(matvec(x)) + self.B = matvec + self.bmat = 'G' + elif mode == 5: + if matvec is None: + raise ValueError("matvec must be specified for mode=5") + if Minv_matvec is None: + raise ValueError("Minv_matvec must be specified for mode=5") + + self.OPa = Minv_matvec + self.A_matvec = matvec + + if M_matvec is None: + self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x) + self.B = lambda x: x + self.bmat = 'I' + else: + self.OP = lambda x: Minv_matvec(matvec(x) + + sigma * M_matvec(x)) + self.B = M_matvec + self.bmat = 'G' + else: + raise ValueError("mode=%i not implemented" % mode) + + if which not in _SEUPD_WHICH: + raise ValueError("which must be one of %s" + % ' '.join(_SEUPD_WHICH)) + if k >= n: + raise ValueError("k must be less than ndim(A), k=%d" % k) + + _ArpackParams.__init__(self, n, k, tp, mode, sigma, + ncv, v0, maxiter, which, tol) + + if self.ncv > n or self.ncv <= k: + raise ValueError("ncv must be k= n - 1: + raise ValueError("k must be less than ndim(A)-1, k=%d" % k) + + _ArpackParams.__init__(self, n, k, tp, mode, sigma, + ncv, v0, maxiter, which, tol) + + if self.ncv > n or self.ncv <= k + 1: + raise ValueError("ncv must be k+1 k, so we'll + # throw out this case. + nreturned -= 1 + i += 1 + + else: + # real matrix, mode 3 or 4, imag(sigma) is nonzero: + # see remark 3 in neupd.f + # Build complex eigenvalues from real and imaginary parts + i = 0 + while i <= k: + if abs(d[i].imag) == 0: + d[i] = np.dot(zr[:, i], self.matvec(zr[:, i])) + else: + if i < k: + z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1] + z[:, i + 1] = z[:, i].conjugate() + d[i] = ((np.dot(zr[:, i], + self.matvec(zr[:, i])) + + np.dot(zr[:, i + 1], + self.matvec(zr[:, i + 1]))) + + 1j * (np.dot(zr[:, i], + self.matvec(zr[:, i + 1])) + - np.dot(zr[:, i + 1], + self.matvec(zr[:, i])))) + d[i + 1] = d[i].conj() + i += 1 + else: + #last eigenvalue is complex: the imaginary part of + # the eigenvector has not been returned + #this can only happen if nreturned > k, so we'll + # throw out this case. + nreturned -= 1 + i += 1 + + # Now we have k+1 possible eigenvalues and eigenvectors + # Return the ones specified by the keyword "which" + + if nreturned <= k: + # we got less or equal as many eigenvalues we wanted + d = d[:nreturned] + z = z[:, :nreturned] + else: + # we got one extra eigenvalue (likely a cc pair, but which?) + if self.mode in (1, 2): + rd = d + elif self.mode in (3, 4): + rd = 1 / (d - self.sigma) + + if self.which in ['LR', 'SR']: + ind = np.argsort(rd.real) + elif self.which in ['LI', 'SI']: + # for LI,SI ARPACK returns largest,smallest + # abs(imaginary) (complex pairs come together) + ind = np.argsort(abs(rd.imag)) + else: + ind = np.argsort(abs(rd)) + + if self.which in ['LR', 'LM', 'LI']: + ind = ind[-k:][::-1] + elif self.which in ['SR', 'SM', 'SI']: + ind = ind[:k] + + d = d[ind] + z = z[:, ind] + else: + # complex is so much simpler... + d, z, ierr =\ + self._arpack_extract(return_eigenvectors, + howmny, sselect, self.sigma, workev, + self.bmat, self.which, k, self.tol, self.resid, + self.v, self.iparam, self.ipntr, + self.workd, self.workl, self.rwork, ierr) + + if ierr != 0: + raise ArpackError(ierr, infodict=self.extract_infodict) + + k_ok = self.iparam[4] + d = d[:k_ok] + z = z[:, :k_ok] + + if return_eigenvectors: + return d, z + else: + return d + + +def _aslinearoperator_with_dtype(m): + m = aslinearoperator(m) + if not hasattr(m, 'dtype'): + x = np.zeros(m.shape[1]) + m.dtype = (m * x).dtype + return m + + +class SpLuInv(LinearOperator): + """ + SpLuInv: + helper class to repeatedly solve M*x=b + using a sparse LU-decomposition of M + """ + + def __init__(self, M): + self.M_lu = splu(M) + self.shape = M.shape + self.dtype = M.dtype + self.isreal = not np.issubdtype(self.dtype, np.complexfloating) + + def _matvec(self, x): + # careful here: splu.solve will throw away imaginary + # part of x if M is real + x = np.asarray(x) + if self.isreal and np.issubdtype(x.dtype, np.complexfloating): + return (self.M_lu.solve(np.real(x).astype(self.dtype)) + + 1j * self.M_lu.solve(np.imag(x).astype(self.dtype))) + else: + return self.M_lu.solve(x.astype(self.dtype)) + + +class LuInv(LinearOperator): + """ + LuInv: + helper class to repeatedly solve M*x=b + using an LU-decomposition of M + """ + + def __init__(self, M): + self.M_lu = lu_factor(M) + self.shape = M.shape + self.dtype = M.dtype + + def _matvec(self, x): + return lu_solve(self.M_lu, x) + + +def gmres_loose(A, b, tol): + """ + gmres with looser termination condition. + """ + b = np.asarray(b) + min_tol = 1000 * np.sqrt(b.size) * np.finfo(b.dtype).eps + return gmres(A, b, rtol=max(tol, min_tol), atol=0) + + +class IterInv(LinearOperator): + """ + IterInv: + helper class to repeatedly solve M*x=b + using an iterative method. + """ + + def __init__(self, M, ifunc=gmres_loose, tol=0): + self.M = M + if hasattr(M, 'dtype'): + self.dtype = M.dtype + else: + x = np.zeros(M.shape[1]) + self.dtype = (M * x).dtype + self.shape = M.shape + + if tol <= 0: + # when tol=0, ARPACK uses machine tolerance as calculated + # by LAPACK's _LAMCH function. We should match this + tol = 2 * np.finfo(self.dtype).eps + self.ifunc = ifunc + self.tol = tol + + def _matvec(self, x): + b, info = self.ifunc(self.M, x, tol=self.tol) + if info != 0: + raise ValueError("Error in inverting M: function " + "%s did not converge (info = %i)." + % (self.ifunc.__name__, info)) + return b + + +class IterOpInv(LinearOperator): + """ + IterOpInv: + helper class to repeatedly solve [A-sigma*M]*x = b + using an iterative method + """ + + def __init__(self, A, M, sigma, ifunc=gmres_loose, tol=0): + self.A = A + self.M = M + self.sigma = sigma + + def mult_func(x): + return A.matvec(x) - sigma * M.matvec(x) + + def mult_func_M_None(x): + return A.matvec(x) - sigma * x + + x = np.zeros(A.shape[1]) + if M is None: + dtype = mult_func_M_None(x).dtype + self.OP = LinearOperator(self.A.shape, + mult_func_M_None, + dtype=dtype) + else: + dtype = mult_func(x).dtype + self.OP = LinearOperator(self.A.shape, + mult_func, + dtype=dtype) + self.shape = A.shape + + if tol <= 0: + # when tol=0, ARPACK uses machine tolerance as calculated + # by LAPACK's _LAMCH function. We should match this + tol = 2 * np.finfo(self.OP.dtype).eps + self.ifunc = ifunc + self.tol = tol + + def _matvec(self, x): + b, info = self.ifunc(self.OP, x, tol=self.tol) + if info != 0: + raise ValueError("Error in inverting [A-sigma*M]: function " + "%s did not converge (info = %i)." + % (self.ifunc.__name__, info)) + return b + + @property + def dtype(self): + return self.OP.dtype + + +def _fast_spmatrix_to_csc(A, hermitian=False): + """Convert sparse matrix to CSC (by transposing, if possible)""" + if (A.format == "csr" and hermitian + and not np.issubdtype(A.dtype, np.complexfloating)): + return A.T + elif is_pydata_spmatrix(A): + # No need to convert + return A + else: + return A.tocsc() + + +def get_inv_matvec(M, hermitian=False, tol=0): + if isdense(M): + return LuInv(M).matvec + elif issparse(M) or is_pydata_spmatrix(M): + M = _fast_spmatrix_to_csc(M, hermitian=hermitian) + return SpLuInv(M).matvec + else: + return IterInv(M, tol=tol).matvec + + +def get_OPinv_matvec(A, M, sigma, hermitian=False, tol=0): + if sigma == 0: + return get_inv_matvec(A, hermitian=hermitian, tol=tol) + + if M is None: + #M is the identity matrix + if isdense(A): + if (np.issubdtype(A.dtype, np.complexfloating) + or np.imag(sigma) == 0): + A = np.copy(A) + else: + A = A + 0j + A.flat[::A.shape[1] + 1] -= sigma + return LuInv(A).matvec + elif issparse(A) or is_pydata_spmatrix(A): + A = A - sigma * eye(A.shape[0]) + A = _fast_spmatrix_to_csc(A, hermitian=hermitian) + return SpLuInv(A).matvec + else: + return IterOpInv(_aslinearoperator_with_dtype(A), + M, sigma, tol=tol).matvec + else: + if ((not isdense(A) and not issparse(A) and not is_pydata_spmatrix(A)) or + (not isdense(M) and not issparse(M) and not is_pydata_spmatrix(A))): + return IterOpInv(_aslinearoperator_with_dtype(A), + _aslinearoperator_with_dtype(M), + sigma, tol=tol).matvec + elif isdense(A) or isdense(M): + return LuInv(A - sigma * M).matvec + else: + OP = A - sigma * M + OP = _fast_spmatrix_to_csc(OP, hermitian=hermitian) + return SpLuInv(OP).matvec + + +# ARPACK is not threadsafe or reentrant (SAVE variables), so we need a +# lock and a re-entering check. +_ARPACK_LOCK = ReentrancyLock("Nested calls to eigs/eighs not allowed: " + "ARPACK is not re-entrant") + + +def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, + ncv=None, maxiter=None, tol=0, return_eigenvectors=True, + Minv=None, OPinv=None, OPpart=None): + """ + Find k eigenvalues and eigenvectors of the square matrix A. + + Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem + for w[i] eigenvalues with corresponding eigenvectors x[i]. + + If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the + generalized eigenvalue problem for w[i] eigenvalues + with corresponding eigenvectors x[i] + + Parameters + ---------- + A : ndarray, sparse matrix or LinearOperator + An array, sparse matrix, or LinearOperator representing + the operation ``A @ x``, where A is a real or complex square matrix. + k : int, optional + The number of eigenvalues and eigenvectors desired. + `k` must be smaller than N-1. It is not possible to compute all + eigenvectors of a matrix. + M : ndarray, sparse matrix or LinearOperator, optional + An array, sparse matrix, or LinearOperator representing + the operation M@x for the generalized eigenvalue problem + + A @ x = w * M @ x. + + M must represent a real symmetric matrix if A is real, and must + represent a complex Hermitian matrix if A is complex. For best + results, the data type of M should be the same as that of A. + Additionally: + + If `sigma` is None, M is positive definite + + If sigma is specified, M is positive semi-definite + + If sigma is None, eigs requires an operator to compute the solution + of the linear equation ``M @ x = b``. This is done internally via a + (sparse) LU decomposition for an explicit matrix M, or via an + iterative solver for a general linear operator. Alternatively, + the user can supply the matrix or operator Minv, which gives + ``x = Minv @ b = M^-1 @ b``. + sigma : real or complex, optional + Find eigenvalues near sigma using shift-invert mode. This requires + an operator to compute the solution of the linear system + ``[A - sigma * M] @ x = b``, where M is the identity matrix if + unspecified. This is computed internally via a (sparse) LU + decomposition for explicit matrices A & M, or via an iterative + solver if either A or M is a general linear operator. + Alternatively, the user can supply the matrix or operator OPinv, + which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``. + For a real matrix A, shift-invert can either be done in imaginary + mode or real mode, specified by the parameter OPpart ('r' or 'i'). + Note that when sigma is specified, the keyword 'which' (below) + refers to the shifted eigenvalues ``w'[i]`` where: + + If A is real and OPpart == 'r' (default), + ``w'[i] = 1/2 * [1/(w[i]-sigma) + 1/(w[i]-conj(sigma))]``. + + If A is real and OPpart == 'i', + ``w'[i] = 1/2i * [1/(w[i]-sigma) - 1/(w[i]-conj(sigma))]``. + + If A is complex, ``w'[i] = 1/(w[i]-sigma)``. + + v0 : ndarray, optional + Starting vector for iteration. + Default: random + ncv : int, optional + The number of Lanczos vectors generated + `ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``. + Default: ``min(n, max(2*k + 1, 20))`` + which : str, ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'], optional + Which `k` eigenvectors and eigenvalues to find: + + 'LM' : largest magnitude + + 'SM' : smallest magnitude + + 'LR' : largest real part + + 'SR' : smallest real part + + 'LI' : largest imaginary part + + 'SI' : smallest imaginary part + + When sigma != None, 'which' refers to the shifted eigenvalues w'[i] + (see discussion in 'sigma', above). ARPACK is generally better + at finding large values than small values. If small eigenvalues are + desired, consider using shift-invert mode for better performance. + maxiter : int, optional + Maximum number of Arnoldi update iterations allowed + Default: ``n*10`` + tol : float, optional + Relative accuracy for eigenvalues (stopping criterion) + The default value of 0 implies machine precision. + return_eigenvectors : bool, optional + Return eigenvectors (True) in addition to eigenvalues + Minv : ndarray, sparse matrix or LinearOperator, optional + See notes in M, above. + OPinv : ndarray, sparse matrix or LinearOperator, optional + See notes in sigma, above. + OPpart : {'r' or 'i'}, optional + See notes in sigma, above + + Returns + ------- + w : ndarray + Array of k eigenvalues. + v : ndarray + An array of `k` eigenvectors. + ``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i]. + + Raises + ------ + ArpackNoConvergence + When the requested convergence is not obtained. + The currently converged eigenvalues and eigenvectors can be found + as ``eigenvalues`` and ``eigenvectors`` attributes of the exception + object. + + See Also + -------- + eigsh : eigenvalues and eigenvectors for symmetric matrix A + svds : singular value decomposition for a matrix A + + Notes + ----- + This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD, + ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to + find the eigenvalues and eigenvectors [2]_. + + References + ---------- + .. [1] ARPACK Software, https://github.com/opencollab/arpack-ng + .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: + Solution of Large Scale Eigenvalue Problems by Implicitly Restarted + Arnoldi Methods. SIAM, Philadelphia, PA, 1998. + + Examples + -------- + Find 6 eigenvectors of the identity matrix: + + >>> import numpy as np + >>> from scipy.sparse.linalg import eigs + >>> id = np.eye(13) + >>> vals, vecs = eigs(id, k=6) + >>> vals + array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) + >>> vecs.shape + (13, 6) + + """ + if A.shape[0] != A.shape[1]: + raise ValueError(f'expected square matrix (shape={A.shape})') + if M is not None: + if M.shape != A.shape: + raise ValueError(f'wrong M dimensions {M.shape}, should be {A.shape}') + if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): + warnings.warn('M does not have the same type precision as A. ' + 'This may adversely affect ARPACK convergence', + stacklevel=2) + + n = A.shape[0] + + if k <= 0: + raise ValueError("k=%d must be greater than 0." % k) + + if k >= n - 1: + warnings.warn("k >= N - 1 for N * N square matrix. " + "Attempting to use scipy.linalg.eig instead.", + RuntimeWarning, stacklevel=2) + + if issparse(A): + raise TypeError("Cannot use scipy.linalg.eig for sparse A with " + "k >= N - 1. Use scipy.linalg.eig(A.toarray()) or" + " reduce k.") + if isinstance(A, LinearOperator): + raise TypeError("Cannot use scipy.linalg.eig for LinearOperator " + "A with k >= N - 1.") + if isinstance(M, LinearOperator): + raise TypeError("Cannot use scipy.linalg.eig for LinearOperator " + "M with k >= N - 1.") + + return eig(A, b=M, right=return_eigenvectors) + + if sigma is None: + matvec = _aslinearoperator_with_dtype(A).matvec + + if OPinv is not None: + raise ValueError("OPinv should not be specified " + "with sigma = None.") + if OPpart is not None: + raise ValueError("OPpart should not be specified with " + "sigma = None or complex A") + + if M is None: + #standard eigenvalue problem + mode = 1 + M_matvec = None + Minv_matvec = None + if Minv is not None: + raise ValueError("Minv should not be " + "specified with M = None.") + else: + #general eigenvalue problem + mode = 2 + if Minv is None: + Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol) + else: + Minv = _aslinearoperator_with_dtype(Minv) + Minv_matvec = Minv.matvec + M_matvec = _aslinearoperator_with_dtype(M).matvec + else: + #sigma is not None: shift-invert mode + if np.issubdtype(A.dtype, np.complexfloating): + if OPpart is not None: + raise ValueError("OPpart should not be specified " + "with sigma=None or complex A") + mode = 3 + elif OPpart is None or OPpart.lower() == 'r': + mode = 3 + elif OPpart.lower() == 'i': + if np.imag(sigma) == 0: + raise ValueError("OPpart cannot be 'i' if sigma is real") + mode = 4 + else: + raise ValueError("OPpart must be one of ('r','i')") + + matvec = _aslinearoperator_with_dtype(A).matvec + if Minv is not None: + raise ValueError("Minv should not be specified when sigma is") + if OPinv is None: + Minv_matvec = get_OPinv_matvec(A, M, sigma, + hermitian=False, tol=tol) + else: + OPinv = _aslinearoperator_with_dtype(OPinv) + Minv_matvec = OPinv.matvec + if M is None: + M_matvec = None + else: + M_matvec = _aslinearoperator_with_dtype(M).matvec + + params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode, + M_matvec, Minv_matvec, sigma, + ncv, v0, maxiter, which, tol) + + with _ARPACK_LOCK: + while not params.converged: + params.iterate() + + return params.extract(return_eigenvectors) + + +def eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, + ncv=None, maxiter=None, tol=0, return_eigenvectors=True, + Minv=None, OPinv=None, mode='normal'): + """ + Find k eigenvalues and eigenvectors of the real symmetric square matrix + or complex Hermitian matrix A. + + Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem for + w[i] eigenvalues with corresponding eigenvectors x[i]. + + If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the + generalized eigenvalue problem for w[i] eigenvalues + with corresponding eigenvectors x[i]. + + Note that there is no specialized routine for the case when A is a complex + Hermitian matrix. In this case, ``eigsh()`` will call ``eigs()`` and return the + real parts of the eigenvalues thus obtained. + + Parameters + ---------- + A : ndarray, sparse matrix or LinearOperator + A square operator representing the operation ``A @ x``, where ``A`` is + real symmetric or complex Hermitian. For buckling mode (see below) + ``A`` must additionally be positive-definite. + k : int, optional + The number of eigenvalues and eigenvectors desired. + `k` must be smaller than N. It is not possible to compute all + eigenvectors of a matrix. + + Returns + ------- + w : array + Array of k eigenvalues. + v : array + An array representing the `k` eigenvectors. The column ``v[:, i]`` is + the eigenvector corresponding to the eigenvalue ``w[i]``. + + Other Parameters + ---------------- + M : An N x N matrix, array, sparse matrix, or linear operator representing + the operation ``M @ x`` for the generalized eigenvalue problem + + A @ x = w * M @ x. + + M must represent a real symmetric matrix if A is real, and must + represent a complex Hermitian matrix if A is complex. For best + results, the data type of M should be the same as that of A. + Additionally: + + If sigma is None, M is symmetric positive definite. + + If sigma is specified, M is symmetric positive semi-definite. + + In buckling mode, M is symmetric indefinite. + + If sigma is None, eigsh requires an operator to compute the solution + of the linear equation ``M @ x = b``. This is done internally via a + (sparse) LU decomposition for an explicit matrix M, or via an + iterative solver for a general linear operator. Alternatively, + the user can supply the matrix or operator Minv, which gives + ``x = Minv @ b = M^-1 @ b``. + sigma : real + Find eigenvalues near sigma using shift-invert mode. This requires + an operator to compute the solution of the linear system + ``[A - sigma * M] x = b``, where M is the identity matrix if + unspecified. This is computed internally via a (sparse) LU + decomposition for explicit matrices A & M, or via an iterative + solver if either A or M is a general linear operator. + Alternatively, the user can supply the matrix or operator OPinv, + which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``. + Note that when sigma is specified, the keyword 'which' refers to + the shifted eigenvalues ``w'[i]`` where: + + if mode == 'normal', ``w'[i] = 1 / (w[i] - sigma)``. + + if mode == 'cayley', ``w'[i] = (w[i] + sigma) / (w[i] - sigma)``. + + if mode == 'buckling', ``w'[i] = w[i] / (w[i] - sigma)``. + + (see further discussion in 'mode' below) + v0 : ndarray, optional + Starting vector for iteration. + Default: random + ncv : int, optional + The number of Lanczos vectors generated ncv must be greater than k and + smaller than n; it is recommended that ``ncv > 2*k``. + Default: ``min(n, max(2*k + 1, 20))`` + which : str ['LM' | 'SM' | 'LA' | 'SA' | 'BE'] + If A is a complex Hermitian matrix, 'BE' is invalid. + Which `k` eigenvectors and eigenvalues to find: + + 'LM' : Largest (in magnitude) eigenvalues. + + 'SM' : Smallest (in magnitude) eigenvalues. + + 'LA' : Largest (algebraic) eigenvalues. + + 'SA' : Smallest (algebraic) eigenvalues. + + 'BE' : Half (k/2) from each end of the spectrum. + + When k is odd, return one more (k/2+1) from the high end. + When sigma != None, 'which' refers to the shifted eigenvalues ``w'[i]`` + (see discussion in 'sigma', above). ARPACK is generally better + at finding large values than small values. If small eigenvalues are + desired, consider using shift-invert mode for better performance. + maxiter : int, optional + Maximum number of Arnoldi update iterations allowed. + Default: ``n*10`` + tol : float + Relative accuracy for eigenvalues (stopping criterion). + The default value of 0 implies machine precision. + Minv : N x N matrix, array, sparse matrix, or LinearOperator + See notes in M, above. + OPinv : N x N matrix, array, sparse matrix, or LinearOperator + See notes in sigma, above. + return_eigenvectors : bool + Return eigenvectors (True) in addition to eigenvalues. + This value determines the order in which eigenvalues are sorted. + The sort order is also dependent on the `which` variable. + + For which = 'LM' or 'SA': + If `return_eigenvectors` is True, eigenvalues are sorted by + algebraic value. + + If `return_eigenvectors` is False, eigenvalues are sorted by + absolute value. + + For which = 'BE' or 'LA': + eigenvalues are always sorted by algebraic value. + + For which = 'SM': + If `return_eigenvectors` is True, eigenvalues are sorted by + algebraic value. + + If `return_eigenvectors` is False, eigenvalues are sorted by + decreasing absolute value. + + mode : string ['normal' | 'buckling' | 'cayley'] + Specify strategy to use for shift-invert mode. This argument applies + only for real-valued A and sigma != None. For shift-invert mode, + ARPACK internally solves the eigenvalue problem + ``OP @ x'[i] = w'[i] * B @ x'[i]`` + and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i] + into the desired eigenvectors and eigenvalues of the problem + ``A @ x[i] = w[i] * M @ x[i]``. + The modes are as follows: + + 'normal' : + OP = [A - sigma * M]^-1 @ M, + B = M, + w'[i] = 1 / (w[i] - sigma) + + 'buckling' : + OP = [A - sigma * M]^-1 @ A, + B = A, + w'[i] = w[i] / (w[i] - sigma) + + 'cayley' : + OP = [A - sigma * M]^-1 @ [A + sigma * M], + B = M, + w'[i] = (w[i] + sigma) / (w[i] - sigma) + + The choice of mode will affect which eigenvalues are selected by + the keyword 'which', and can also impact the stability of + convergence (see [2] for a discussion). + + Raises + ------ + ArpackNoConvergence + When the requested convergence is not obtained. + + The currently converged eigenvalues and eigenvectors can be found + as ``eigenvalues`` and ``eigenvectors`` attributes of the exception + object. + + See Also + -------- + eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A + svds : singular value decomposition for a matrix A + + Notes + ----- + This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD + functions which use the Implicitly Restarted Lanczos Method to + find the eigenvalues and eigenvectors [2]_. + + References + ---------- + .. [1] ARPACK Software, https://github.com/opencollab/arpack-ng + .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: + Solution of Large Scale Eigenvalue Problems by Implicitly Restarted + Arnoldi Methods. SIAM, Philadelphia, PA, 1998. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse.linalg import eigsh + >>> identity = np.eye(13) + >>> eigenvalues, eigenvectors = eigsh(identity, k=6) + >>> eigenvalues + array([1., 1., 1., 1., 1., 1.]) + >>> eigenvectors.shape + (13, 6) + + """ + # complex Hermitian matrices should be solved with eigs + if np.issubdtype(A.dtype, np.complexfloating): + if mode != 'normal': + raise ValueError("mode=%s cannot be used with " + "complex matrix A" % mode) + if which == 'BE': + raise ValueError("which='BE' cannot be used with complex matrix A") + elif which == 'LA': + which = 'LR' + elif which == 'SA': + which = 'SR' + ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0, + ncv=ncv, maxiter=maxiter, tol=tol, + return_eigenvectors=return_eigenvectors, Minv=Minv, + OPinv=OPinv) + + if return_eigenvectors: + return ret[0].real, ret[1] + else: + return ret.real + + if A.shape[0] != A.shape[1]: + raise ValueError(f'expected square matrix (shape={A.shape})') + if M is not None: + if M.shape != A.shape: + raise ValueError(f'wrong M dimensions {M.shape}, should be {A.shape}') + if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): + warnings.warn('M does not have the same type precision as A. ' + 'This may adversely affect ARPACK convergence', + stacklevel=2) + + n = A.shape[0] + + if k <= 0: + raise ValueError("k must be greater than 0.") + + if k >= n: + warnings.warn("k >= N for N * N square matrix. " + "Attempting to use scipy.linalg.eigh instead.", + RuntimeWarning, stacklevel=2) + + if issparse(A): + raise TypeError("Cannot use scipy.linalg.eigh for sparse A with " + "k >= N. Use scipy.linalg.eigh(A.toarray()) or" + " reduce k.") + if isinstance(A, LinearOperator): + raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator " + "A with k >= N.") + if isinstance(M, LinearOperator): + raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator " + "M with k >= N.") + + return eigh(A, b=M, eigvals_only=not return_eigenvectors) + + if sigma is None: + A = _aslinearoperator_with_dtype(A) + matvec = A.matvec + + if OPinv is not None: + raise ValueError("OPinv should not be specified " + "with sigma = None.") + if M is None: + #standard eigenvalue problem + mode = 1 + M_matvec = None + Minv_matvec = None + if Minv is not None: + raise ValueError("Minv should not be " + "specified with M = None.") + else: + #general eigenvalue problem + mode = 2 + if Minv is None: + Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol) + else: + Minv = _aslinearoperator_with_dtype(Minv) + Minv_matvec = Minv.matvec + M_matvec = _aslinearoperator_with_dtype(M).matvec + else: + # sigma is not None: shift-invert mode + if Minv is not None: + raise ValueError("Minv should not be specified when sigma is") + + # normal mode + if mode == 'normal': + mode = 3 + matvec = None + if OPinv is None: + Minv_matvec = get_OPinv_matvec(A, M, sigma, + hermitian=True, tol=tol) + else: + OPinv = _aslinearoperator_with_dtype(OPinv) + Minv_matvec = OPinv.matvec + if M is None: + M_matvec = None + else: + M = _aslinearoperator_with_dtype(M) + M_matvec = M.matvec + + # buckling mode + elif mode == 'buckling': + mode = 4 + if OPinv is None: + Minv_matvec = get_OPinv_matvec(A, M, sigma, + hermitian=True, tol=tol) + else: + Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec + matvec = _aslinearoperator_with_dtype(A).matvec + M_matvec = None + + # cayley-transform mode + elif mode == 'cayley': + mode = 5 + matvec = _aslinearoperator_with_dtype(A).matvec + if OPinv is None: + Minv_matvec = get_OPinv_matvec(A, M, sigma, + hermitian=True, tol=tol) + else: + Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec + if M is None: + M_matvec = None + else: + M_matvec = _aslinearoperator_with_dtype(M).matvec + + # unrecognized mode + else: + raise ValueError("unrecognized mode '%s'" % mode) + + params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode, + M_matvec, Minv_matvec, sigma, + ncv, v0, maxiter, which, tol) + + with _ARPACK_LOCK: + while not params.converged: + params.iterate() + + return params.extract(return_eigenvectors) diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05e4458071f838d875316547ec02ea2507d5176f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/test_arpack.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/test_arpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab6d330c507f14241f0ed5b458015b5baa20d7f8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/test_arpack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py new file mode 100644 index 0000000000000000000000000000000000000000..1cf73f28e21d4f29d067ae84bf63e5291acbe810 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py @@ -0,0 +1,718 @@ +__usage__ = """ +To run tests locally: + python tests/test_arpack.py [-l] [-v] + +""" + +import threading +import itertools + +import numpy as np + +from numpy.testing import assert_allclose, assert_equal, suppress_warnings +from pytest import raises as assert_raises +import pytest + +from numpy import dot, conj, random +from scipy.linalg import eig, eigh +from scipy.sparse import csc_matrix, csr_matrix, diags, rand +from scipy.sparse.linalg import LinearOperator, aslinearoperator +from scipy.sparse.linalg._eigen.arpack import (eigs, eigsh, arpack, + ArpackNoConvergence) + + +from scipy._lib._gcutils import assert_deallocated, IS_PYPY + + +# precision for tests +_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11} + + +def _get_test_tolerance(type_char, mattype=None, D_type=None, which=None): + """ + Return tolerance values suitable for a given test: + + Parameters + ---------- + type_char : {'f', 'd', 'F', 'D'} + Data type in ARPACK eigenvalue problem + mattype : {csr_matrix, aslinearoperator, asarray}, optional + Linear operator type + + Returns + ------- + tol + Tolerance to pass to the ARPACK routine + rtol + Relative tolerance for outputs + atol + Absolute tolerance for outputs + + """ + + rtol = {'f': 3000 * np.finfo(np.float32).eps, + 'F': 3000 * np.finfo(np.float32).eps, + 'd': 2000 * np.finfo(np.float64).eps, + 'D': 2000 * np.finfo(np.float64).eps}[type_char] + atol = rtol + tol = 0 + + if mattype is aslinearoperator and type_char in ('f', 'F'): + # iterative methods in single precision: worse errors + # also: bump ARPACK tolerance so that the iterative method converges + tol = 30 * np.finfo(np.float32).eps + rtol *= 5 + + if mattype is csr_matrix and type_char in ('f', 'F'): + # sparse in single precision: worse errors + rtol *= 5 + + if ( + which in ('LM', 'SM', 'LA') + and D_type.name == "gen-hermitian-Mc" + ): + if type_char == 'F': + # missing case 1, 2, and more, from PR 14798 + rtol *= 5 + + if type_char == 'D': + # missing more cases, from PR 14798 + rtol *= 10 + atol *= 10 + + return tol, rtol, atol + + +def generate_matrix(N, complex_=False, hermitian=False, + pos_definite=False, sparse=False): + M = np.random.random((N, N)) + if complex_: + M = M + 1j * np.random.random((N, N)) + + if hermitian: + if pos_definite: + if sparse: + i = np.arange(N) + j = np.random.randint(N, size=N-2) + i, j = np.meshgrid(i, j) + M[i, j] = 0 + M = np.dot(M.conj(), M.T) + else: + M = np.dot(M.conj(), M.T) + if sparse: + i = np.random.randint(N, size=N * N // 4) + j = np.random.randint(N, size=N * N // 4) + ind = np.nonzero(i == j) + j[ind] = (j[ind] + 1) % N + M[i, j] = 0 + M[j, i] = 0 + else: + if sparse: + i = np.random.randint(N, size=N * N // 2) + j = np.random.randint(N, size=N * N // 2) + M[i, j] = 0 + return M + + +def generate_matrix_symmetric(N, pos_definite=False, sparse=False): + M = np.random.random((N, N)) + + M = 0.5 * (M + M.T) # Make M symmetric + + if pos_definite: + Id = N * np.eye(N) + if sparse: + M = csr_matrix(M) + M += Id + else: + if sparse: + M = csr_matrix(M) + + return M + + +def assert_allclose_cc(actual, desired, **kw): + """Almost equal or complex conjugates almost equal""" + try: + assert_allclose(actual, desired, **kw) + except AssertionError: + assert_allclose(actual, conj(desired), **kw) + + +def argsort_which(eigenvalues, typ, k, which, + sigma=None, OPpart=None, mode=None): + """Return sorted indices of eigenvalues using the "which" keyword + from eigs and eigsh""" + if sigma is None: + reval = np.round(eigenvalues, decimals=_ndigits[typ]) + else: + if mode is None or mode == 'normal': + if OPpart is None: + reval = 1. / (eigenvalues - sigma) + elif OPpart == 'r': + reval = 0.5 * (1. / (eigenvalues - sigma) + + 1. / (eigenvalues - np.conj(sigma))) + elif OPpart == 'i': + reval = -0.5j * (1. / (eigenvalues - sigma) + - 1. / (eigenvalues - np.conj(sigma))) + elif mode == 'cayley': + reval = (eigenvalues + sigma) / (eigenvalues - sigma) + elif mode == 'buckling': + reval = eigenvalues / (eigenvalues - sigma) + else: + raise ValueError("mode='%s' not recognized" % mode) + + reval = np.round(reval, decimals=_ndigits[typ]) + + if which in ['LM', 'SM']: + ind = np.argsort(abs(reval)) + elif which in ['LR', 'SR', 'LA', 'SA', 'BE']: + ind = np.argsort(np.real(reval)) + elif which in ['LI', 'SI']: + # for LI,SI ARPACK returns largest,smallest abs(imaginary) why? + if typ.islower(): + ind = np.argsort(abs(np.imag(reval))) + else: + ind = np.argsort(np.imag(reval)) + else: + raise ValueError("which='%s' is unrecognized" % which) + + if which in ['LM', 'LA', 'LR', 'LI']: + return ind[-k:] + elif which in ['SM', 'SA', 'SR', 'SI']: + return ind[:k] + elif which == 'BE': + return np.concatenate((ind[:k//2], ind[k//2-k:])) + + +def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None, + mattype=np.asarray, OPpart=None, mode='normal'): + general = ('bmat' in d) + + if symmetric: + eigs_func = eigsh + else: + eigs_func = eigs + + if general: + err = ("error for {}:general, typ={}, which={}, sigma={}, " + "mattype={}, OPpart={}, mode={}".format(eigs_func.__name__, + typ, which, sigma, + mattype.__name__, + OPpart, mode)) + else: + err = ("error for {}:standard, typ={}, which={}, sigma={}, " + "mattype={}, OPpart={}, mode={}".format(eigs_func.__name__, + typ, which, sigma, + mattype.__name__, + OPpart, mode)) + + a = d['mat'].astype(typ) + ac = mattype(a) + + if general: + b = d['bmat'].astype(typ) + bc = mattype(b) + + # get exact eigenvalues + exact_eval = d['eval'].astype(typ.upper()) + ind = argsort_which(exact_eval, typ, k, which, + sigma, OPpart, mode) + exact_eval = exact_eval[ind] + + # compute arpack eigenvalues + kwargs = dict(which=which, v0=v0, sigma=sigma) + if eigs_func is eigsh: + kwargs['mode'] = mode + else: + kwargs['OPpart'] = OPpart + + # compute suitable tolerances + kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype, d, which) + # on rare occasions, ARPACK routines return results that are proper + # eigenvalues and -vectors, but not necessarily the ones requested in + # the parameter which. This is inherent to the Krylov methods, and + # should not be treated as a failure. If such a rare situation + # occurs, the calculation is tried again (but at most a few times). + ntries = 0 + while ntries < 5: + # solve + if general: + try: + eigenvalues, evec = eigs_func(ac, k, bc, **kwargs) + except ArpackNoConvergence: + kwargs['maxiter'] = 20*a.shape[0] + eigenvalues, evec = eigs_func(ac, k, bc, **kwargs) + else: + try: + eigenvalues, evec = eigs_func(ac, k, **kwargs) + except ArpackNoConvergence: + kwargs['maxiter'] = 20*a.shape[0] + eigenvalues, evec = eigs_func(ac, k, **kwargs) + + ind = argsort_which(eigenvalues, typ, k, which, + sigma, OPpart, mode) + eigenvalues = eigenvalues[ind] + evec = evec[:, ind] + + try: + # check eigenvalues + assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, + err_msg=err) + check_evecs = True + except AssertionError: + check_evecs = False + ntries += 1 + + if check_evecs: + # check eigenvectors + LHS = np.dot(a, evec) + if general: + RHS = eigenvalues * np.dot(b, evec) + else: + RHS = eigenvalues * evec + + assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err) + break + + # check eigenvalues + assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, err_msg=err) + + +class DictWithRepr(dict): + def __init__(self, name): + self.name = name + + def __repr__(self): + return "<%s>" % self.name + + +class SymmetricParams: + def __init__(self): + self.eigs = eigsh + self.which = ['LM', 'SM', 'LA', 'SA', 'BE'] + self.mattypes = [csr_matrix, aslinearoperator, np.asarray] + self.sigmas_modes = {None: ['normal'], + 0.5: ['normal', 'buckling', 'cayley']} + + # generate matrices + # these should all be float32 so that the eigenvalues + # are the same in float32 and float64 + N = 6 + np.random.seed(2300) + Ar = generate_matrix(N, hermitian=True, + pos_definite=True).astype('f').astype('d') + M = generate_matrix(N, hermitian=True, + pos_definite=True).astype('f').astype('d') + Ac = generate_matrix(N, hermitian=True, pos_definite=True, + complex_=True).astype('F').astype('D') + Mc = generate_matrix(N, hermitian=True, pos_definite=True, + complex_=True).astype('F').astype('D') + v0 = np.random.random(N) + + # standard symmetric problem + SS = DictWithRepr("std-symmetric") + SS['mat'] = Ar + SS['v0'] = v0 + SS['eval'] = eigh(SS['mat'], eigvals_only=True) + + # general symmetric problem + GS = DictWithRepr("gen-symmetric") + GS['mat'] = Ar + GS['bmat'] = M + GS['v0'] = v0 + GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True) + + # standard hermitian problem + SH = DictWithRepr("std-hermitian") + SH['mat'] = Ac + SH['v0'] = v0 + SH['eval'] = eigh(SH['mat'], eigvals_only=True) + + # general hermitian problem + GH = DictWithRepr("gen-hermitian") + GH['mat'] = Ac + GH['bmat'] = M + GH['v0'] = v0 + GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True) + + # general hermitian problem with hermitian M + GHc = DictWithRepr("gen-hermitian-Mc") + GHc['mat'] = Ac + GHc['bmat'] = Mc + GHc['v0'] = v0 + GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True) + + self.real_test_cases = [SS, GS] + self.complex_test_cases = [SH, GH, GHc] + + +class NonSymmetricParams: + def __init__(self): + self.eigs = eigs + self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI'] + self.mattypes = [csr_matrix, aslinearoperator, np.asarray] + self.sigmas_OPparts = {None: [None], + 0.1: ['r'], + 0.1 + 0.1j: ['r', 'i']} + + # generate matrices + # these should all be float32 so that the eigenvalues + # are the same in float32 and float64 + N = 6 + np.random.seed(2300) + Ar = generate_matrix(N).astype('f').astype('d') + M = generate_matrix(N, hermitian=True, + pos_definite=True).astype('f').astype('d') + Ac = generate_matrix(N, complex_=True).astype('F').astype('D') + v0 = np.random.random(N) + + # standard real nonsymmetric problem + SNR = DictWithRepr("std-real-nonsym") + SNR['mat'] = Ar + SNR['v0'] = v0 + SNR['eval'] = eig(SNR['mat'], left=False, right=False) + + # general real nonsymmetric problem + GNR = DictWithRepr("gen-real-nonsym") + GNR['mat'] = Ar + GNR['bmat'] = M + GNR['v0'] = v0 + GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False) + + # standard complex nonsymmetric problem + SNC = DictWithRepr("std-cmplx-nonsym") + SNC['mat'] = Ac + SNC['v0'] = v0 + SNC['eval'] = eig(SNC['mat'], left=False, right=False) + + # general complex nonsymmetric problem + GNC = DictWithRepr("gen-cmplx-nonsym") + GNC['mat'] = Ac + GNC['bmat'] = M + GNC['v0'] = v0 + GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False) + + self.real_test_cases = [SNR, GNR] + self.complex_test_cases = [SNC, GNC] + + +def test_symmetric_modes(): + params = SymmetricParams() + k = 2 + symmetric = True + for D in params.real_test_cases: + for typ in 'fd': + for which in params.which: + for mattype in params.mattypes: + for (sigma, modes) in params.sigmas_modes.items(): + for mode in modes: + eval_evec(symmetric, D, typ, k, which, + None, sigma, mattype, None, mode) + + +def test_hermitian_modes(): + params = SymmetricParams() + k = 2 + symmetric = True + for D in params.complex_test_cases: + for typ in 'FD': + for which in params.which: + if which == 'BE': + continue # BE invalid for complex + for mattype in params.mattypes: + for sigma in params.sigmas_modes: + eval_evec(symmetric, D, typ, k, which, + None, sigma, mattype) + + +def test_symmetric_starting_vector(): + params = SymmetricParams() + symmetric = True + for k in [1, 2, 3, 4, 5]: + for D in params.real_test_cases: + for typ in 'fd': + v0 = random.rand(len(D['v0'])).astype(typ) + eval_evec(symmetric, D, typ, k, 'LM', v0) + + +def test_symmetric_no_convergence(): + np.random.seed(1234) + m = generate_matrix(30, hermitian=True, pos_definite=True) + tol, rtol, atol = _get_test_tolerance('d') + try: + w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9) + raise AssertionError("Spurious no-error exit") + except ArpackNoConvergence as err: + k = len(err.eigenvalues) + if k <= 0: + raise AssertionError("Spurious no-eigenvalues-found case") from err + w, v = err.eigenvalues, err.eigenvectors + assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol) + + +def test_real_nonsymmetric_modes(): + params = NonSymmetricParams() + k = 2 + symmetric = False + for D in params.real_test_cases: + for typ in 'fd': + for which in params.which: + for mattype in params.mattypes: + for sigma, OPparts in params.sigmas_OPparts.items(): + for OPpart in OPparts: + eval_evec(symmetric, D, typ, k, which, + None, sigma, mattype, OPpart) + + +def test_complex_nonsymmetric_modes(): + params = NonSymmetricParams() + k = 2 + symmetric = False + for D in params.complex_test_cases: + for typ in 'DF': + for which in params.which: + for mattype in params.mattypes: + for sigma in params.sigmas_OPparts: + eval_evec(symmetric, D, typ, k, which, + None, sigma, mattype) + + +def test_standard_nonsymmetric_starting_vector(): + params = NonSymmetricParams() + sigma = None + symmetric = False + for k in [1, 2, 3, 4]: + for d in params.complex_test_cases: + for typ in 'FD': + A = d['mat'] + n = A.shape[0] + v0 = random.rand(n).astype(typ) + eval_evec(symmetric, d, typ, k, "LM", v0, sigma) + + +def test_general_nonsymmetric_starting_vector(): + params = NonSymmetricParams() + sigma = None + symmetric = False + for k in [1, 2, 3, 4]: + for d in params.complex_test_cases: + for typ in 'FD': + A = d['mat'] + n = A.shape[0] + v0 = random.rand(n).astype(typ) + eval_evec(symmetric, d, typ, k, "LM", v0, sigma) + + +def test_standard_nonsymmetric_no_convergence(): + np.random.seed(1234) + m = generate_matrix(30, complex_=True) + tol, rtol, atol = _get_test_tolerance('d') + try: + w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol) + raise AssertionError("Spurious no-error exit") + except ArpackNoConvergence as err: + k = len(err.eigenvalues) + if k <= 0: + raise AssertionError("Spurious no-eigenvalues-found case") from err + w, v = err.eigenvalues, err.eigenvectors + for ww, vv in zip(w, v.T): + assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol) + + +def test_eigen_bad_shapes(): + # A is not square. + A = csc_matrix(np.zeros((2, 3))) + assert_raises(ValueError, eigs, A) + + +def test_eigen_bad_kwargs(): + # Test eigen on wrong keyword argument + A = csc_matrix(np.zeros((8, 8))) + assert_raises(ValueError, eigs, A, which='XX') + + +def test_ticket_1459_arpack_crash(): + for dtype in [np.float32, np.float64]: + # This test does not seem to catch the issue for float32, + # but we made the same fix there, just to be sure + + N = 6 + k = 2 + + np.random.seed(2301) + A = np.random.random((N, N)).astype(dtype) + v0 = np.array([-0.71063568258907849895, -0.83185111795729227424, + -0.34365925382227402451, 0.46122533684552280420, + -0.58001341115969040629, -0.78844877570084292984e-01], + dtype=dtype) + + # Should not crash: + evals, evecs = eigs(A, k, v0=v0) + + +@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") +def test_linearoperator_deallocation(): + # Check that the linear operators used by the Arpack wrappers are + # deallocatable by reference counting -- they are big objects, so + # Python's cyclic GC may not collect them fast enough before + # running out of memory if eigs/eigsh are called in a tight loop. + + M_d = np.eye(10) + M_s = csc_matrix(M_d) + M_o = aslinearoperator(M_d) + + with assert_deallocated(lambda: arpack.SpLuInv(M_s)): + pass + with assert_deallocated(lambda: arpack.LuInv(M_d)): + pass + with assert_deallocated(lambda: arpack.IterInv(M_s)): + pass + with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)): + pass + with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)): + pass + +def test_parallel_threads(): + results = [] + v0 = np.random.rand(50) + + def worker(): + x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50)) + w, v = eigs(x, k=3, v0=v0) + results.append(w) + + w, v = eigsh(x, k=3, v0=v0) + results.append(w) + + threads = [threading.Thread(target=worker) for k in range(10)] + for t in threads: + t.start() + for t in threads: + t.join() + + worker() + + for r in results: + assert_allclose(r, results[-1]) + + +def test_reentering(): + # Just some linear operator that calls eigs recursively + def A_matvec(x): + x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50)) + w, v = eigs(x, k=1) + return v / w[0] + A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50)) + + # The Fortran code is not reentrant, so this fails (gracefully, not crashing) + assert_raises(RuntimeError, eigs, A, k=1) + assert_raises(RuntimeError, eigsh, A, k=1) + + +def test_regression_arpackng_1315(): + # Check that issue arpack-ng/#1315 is not present. + # Adapted from arpack-ng/TESTS/bug_1315_single.c + # If this fails, then the installed ARPACK library is faulty. + + for dtype in [np.float32, np.float64]: + np.random.seed(1234) + + w0 = np.arange(1, 1000+1).astype(dtype) + A = diags([w0], [0], shape=(1000, 1000)) + + v0 = np.random.rand(1000).astype(dtype) + w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0) + + assert_allclose(np.sort(w), np.sort(w0[-9:]), + rtol=1e-4) + + +def test_eigs_for_k_greater(): + # Test eigs() for k beyond limits. + A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse + A = generate_matrix(4, sparse=False) + M_dense = np.random.random((4, 4)) + M_sparse = generate_matrix(4, sparse=True) + M_linop = aslinearoperator(M_dense) + eig_tuple1 = eig(A, b=M_dense) + eig_tuple2 = eig(A, b=M_sparse) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + + assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1) + assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1) + assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1) + assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2) + + # M as LinearOperator + assert_raises(TypeError, eigs, A, M=M_linop, k=3) + + # Test 'A' for different types + assert_raises(TypeError, eigs, aslinearoperator(A), k=3) + assert_raises(TypeError, eigs, A_sparse, k=3) + + +def test_eigsh_for_k_greater(): + # Test eigsh() for k beyond limits. + A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse + A = generate_matrix(4, sparse=False) + M_dense = generate_matrix_symmetric(4, pos_definite=True) + M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True) + M_linop = aslinearoperator(M_dense) + eig_tuple1 = eigh(A, b=M_dense) + eig_tuple2 = eigh(A, b=M_sparse) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + + assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1) + assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1) + assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2) + + # M as LinearOperator + assert_raises(TypeError, eigsh, A, M=M_linop, k=4) + + # Test 'A' for different types + assert_raises(TypeError, eigsh, aslinearoperator(A), k=4) + assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4) + + +def test_real_eigs_real_k_subset(): + np.random.seed(1) + + n = 10 + A = rand(n, n, density=0.5) + A.data *= 2 + A.data -= 1 + + v0 = np.ones(n) + + whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI'] + dtypes = [np.float32, np.float64] + + for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes): + prev_w = np.array([], dtype=dtype) + eps = np.finfo(dtype).eps + for k in range(1, 9): + w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma, + v0=v0.astype(dtype), tol=0) + assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps)) + + # Check that the set of eigenvalues for `k` is a subset of that for `k+1` + dist = abs(prev_w[:,None] - w).min(axis=1) + assert_allclose(dist, 0, atol=np.sqrt(eps)) + + prev_w = w + + # Check sort order + if sigma is None: + d = w + else: + d = 1 / (w - sigma) + + if which == 'LM': + # ARPACK is systematic for 'LM', but sort order + # appears not well defined for other modes + assert np.all(np.diff(abs(d)) <= 1e-6) diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__init__.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ab5330361a6bcc2a8403f9b3788aedae750d57f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__init__.py @@ -0,0 +1,16 @@ +""" +Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG) + +LOBPCG is a preconditioned eigensolver for large symmetric positive definite +(SPD) generalized eigenproblems. + +Call the function lobpcg - see help for lobpcg.lobpcg. + +""" +from .lobpcg import * + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9d5b2a23687bbb67df074c0303458d76b35f022 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9843b4b349b78acd734a2eadaaeceed2288c19f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py new file mode 100644 index 0000000000000000000000000000000000000000..2e4cfc155675fff7b36bda24b7c63cf70456e0db --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py @@ -0,0 +1,1112 @@ +""" +Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG). + +References +---------- +.. [1] A. V. Knyazev (2001), + Toward the Optimal Preconditioned Eigensolver: Locally Optimal + Block Preconditioned Conjugate Gradient Method. + SIAM Journal on Scientific Computing 23, no. 2, + pp. 517-541. :doi:`10.1137/S1064827500366124` + +.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007), + Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX) + in hypre and PETSc. :arxiv:`0705.2626` + +.. [3] A. V. Knyazev's C and MATLAB implementations: + https://github.com/lobpcg/blopex +""" + +import warnings +import numpy as np +from scipy.linalg import (inv, eigh, cho_factor, cho_solve, + cholesky, LinAlgError) +from scipy.sparse.linalg import LinearOperator +from scipy.sparse import issparse + +__all__ = ["lobpcg"] + + +def _report_nonhermitian(M, name): + """ + Report if `M` is not a Hermitian matrix given its type. + """ + from scipy.linalg import norm + + md = M - M.T.conj() + nmd = norm(md, 1) + tol = 10 * np.finfo(M.dtype).eps + tol = max(tol, tol * norm(M, 1)) + if nmd > tol: + warnings.warn( + f"Matrix {name} of the type {M.dtype} is not Hermitian: " + f"condition: {nmd} < {tol} fails.", + UserWarning, stacklevel=4 + ) + +def _as2d(ar): + """ + If the input array is 2D return it, if it is 1D, append a dimension, + making it a column vector. + """ + if ar.ndim == 2: + return ar + else: # Assume 1! + aux = np.asarray(ar) + aux.shape = (ar.shape[0], 1) + return aux + + +def _makeMatMat(m): + if m is None: + return None + elif callable(m): + return lambda v: m(v) + else: + return lambda v: m @ v + + +def _matmul_inplace(x, y, verbosityLevel=0): + """Perform 'np.matmul' in-place if possible. + + If some sufficient conditions for inplace matmul are met, do so. + Otherwise try inplace update and fall back to overwrite if that fails. + """ + if x.flags["CARRAY"] and x.shape[1] == y.shape[1] and x.dtype == y.dtype: + # conditions where we can guarantee that inplace updates will work; + # i.e. x is not a view/slice, x & y have compatible dtypes, and the + # shape of the result of x @ y matches the shape of x. + np.matmul(x, y, out=x) + else: + # ideally, we'd have an exhaustive list of conditions above when + # inplace updates are possible; since we don't, we opportunistically + # try if it works, and fall back to overwriting if necessary + try: + np.matmul(x, y, out=x) + except Exception: + if verbosityLevel: + warnings.warn( + "Inplace update of x = x @ y failed, " + "x needs to be overwritten.", + UserWarning, stacklevel=3 + ) + x = x @ y + return x + + +def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY): + """Changes blockVectorV in-place.""" + YBV = blockVectorBY.T.conj() @ blockVectorV + tmp = cho_solve(factYBY, YBV) + blockVectorV -= blockVectorY @ tmp + + +def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, + verbosityLevel=0): + """in-place B-orthonormalize the given block vector using Cholesky.""" + if blockVectorBV is None: + if B is None: + blockVectorBV = blockVectorV + else: + try: + blockVectorBV = B(blockVectorV) + except Exception as e: + if verbosityLevel: + warnings.warn( + f"Secondary MatMul call failed with error\n" + f"{e}\n", + UserWarning, stacklevel=3 + ) + return None, None, None + if blockVectorBV.shape != blockVectorV.shape: + raise ValueError( + f"The shape {blockVectorV.shape} " + f"of the orthogonalized matrix not preserved\n" + f"and changed to {blockVectorBV.shape} " + f"after multiplying by the secondary matrix.\n" + ) + + VBV = blockVectorV.T.conj() @ blockVectorBV + try: + # VBV is a Cholesky factor from now on... + VBV = cholesky(VBV, overwrite_a=True) + VBV = inv(VBV, overwrite_a=True) + blockVectorV = _matmul_inplace( + blockVectorV, VBV, + verbosityLevel=verbosityLevel + ) + if B is not None: + blockVectorBV = _matmul_inplace( + blockVectorBV, VBV, + verbosityLevel=verbosityLevel + ) + return blockVectorV, blockVectorBV, VBV + except LinAlgError: + if verbosityLevel: + warnings.warn( + "Cholesky has failed.", + UserWarning, stacklevel=3 + ) + return None, None, None + + +def _get_indx(_lambda, num, largest): + """Get `num` indices into `_lambda` depending on `largest` option.""" + ii = np.argsort(_lambda) + if largest: + ii = ii[:-num - 1:-1] + else: + ii = ii[:num] + + return ii + + +def _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel): + if verbosityLevel: + _report_nonhermitian(gramA, "gramA") + _report_nonhermitian(gramB, "gramB") + + +def lobpcg( + A, + X, + B=None, + M=None, + Y=None, + tol=None, + maxiter=None, + largest=True, + verbosityLevel=0, + retLambdaHistory=False, + retResidualNormsHistory=False, + restartControl=20, +): + """Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG). + + LOBPCG is a preconditioned eigensolver for large real symmetric and complex + Hermitian definite generalized eigenproblems. + + Parameters + ---------- + A : {sparse matrix, ndarray, LinearOperator, callable object} + The Hermitian linear operator of the problem, usually given by a + sparse matrix. Often called the "stiffness matrix". + X : ndarray, float32 or float64 + Initial approximation to the ``k`` eigenvectors (non-sparse). + If `A` has ``shape=(n,n)`` then `X` must have ``shape=(n,k)``. + B : {sparse matrix, ndarray, LinearOperator, callable object} + Optional. By default ``B = None``, which is equivalent to identity. + The right hand side operator in a generalized eigenproblem if present. + Often called the "mass matrix". Must be Hermitian positive definite. + M : {sparse matrix, ndarray, LinearOperator, callable object} + Optional. By default ``M = None``, which is equivalent to identity. + Preconditioner aiming to accelerate convergence. + Y : ndarray, float32 or float64, default: None + An ``n-by-sizeY`` ndarray of constraints with ``sizeY < n``. + The iterations will be performed in the ``B``-orthogonal complement + of the column-space of `Y`. `Y` must be full rank if present. + tol : scalar, optional + The default is ``tol=n*sqrt(eps)``. + Solver tolerance for the stopping criterion. + maxiter : int, default: 20 + Maximum number of iterations. + largest : bool, default: True + When True, solve for the largest eigenvalues, otherwise the smallest. + verbosityLevel : int, optional + By default ``verbosityLevel=0`` no output. + Controls the solver standard/screen output. + retLambdaHistory : bool, default: False + Whether to return iterative eigenvalue history. + retResidualNormsHistory : bool, default: False + Whether to return iterative history of residual norms. + restartControl : int, optional. + Iterations restart if the residuals jump ``2**restartControl`` times + compared to the smallest recorded in ``retResidualNormsHistory``. + The default is ``restartControl=20``, making the restarts rare for + backward compatibility. + + Returns + ------- + lambda : ndarray of the shape ``(k, )``. + Array of ``k`` approximate eigenvalues. + v : ndarray of the same shape as ``X.shape``. + An array of ``k`` approximate eigenvectors. + lambdaHistory : ndarray, optional. + The eigenvalue history, if `retLambdaHistory` is ``True``. + ResidualNormsHistory : ndarray, optional. + The history of residual norms, if `retResidualNormsHistory` + is ``True``. + + Notes + ----- + The iterative loop runs ``maxit=maxiter`` (20 if ``maxit=None``) + iterations at most and finishes earlier if the tolerance is met. + Breaking backward compatibility with the previous version, LOBPCG + now returns the block of iterative vectors with the best accuracy rather + than the last one iterated, as a cure for possible divergence. + + If ``X.dtype == np.float32`` and user-provided operations/multiplications + by `A`, `B`, and `M` all preserve the ``np.float32`` data type, + all the calculations and the output are in ``np.float32``. + + The size of the iteration history output equals to the number of the best + (limited by `maxit`) iterations plus 3: initial, final, and postprocessing. + + If both `retLambdaHistory` and `retResidualNormsHistory` are ``True``, + the return tuple has the following format + ``(lambda, V, lambda history, residual norms history)``. + + In the following ``n`` denotes the matrix size and ``k`` the number + of required eigenvalues (smallest or largest). + + The LOBPCG code internally solves eigenproblems of the size ``3k`` on every + iteration by calling the dense eigensolver `eigh`, so if ``k`` is not + small enough compared to ``n``, it makes no sense to call the LOBPCG code. + Moreover, if one calls the LOBPCG algorithm for ``5k > n``, it would likely + break internally, so the code calls the standard function `eigh` instead. + It is not that ``n`` should be large for the LOBPCG to work, but rather the + ratio ``n / k`` should be large. It you call LOBPCG with ``k=1`` + and ``n=10``, it works though ``n`` is small. The method is intended + for extremely large ``n / k``. + + The convergence speed depends basically on three factors: + + 1. Quality of the initial approximations `X` to the seeking eigenvectors. + Randomly distributed around the origin vectors work well if no better + choice is known. + + 2. Relative separation of the desired eigenvalues from the rest + of the eigenvalues. One can vary ``k`` to improve the separation. + + 3. Proper preconditioning to shrink the spectral spread. + For example, a rod vibration test problem (under tests + directory) is ill-conditioned for large ``n``, so convergence will be + slow, unless efficient preconditioning is used. For this specific + problem, a good simple preconditioner function would be a linear solve + for `A`, which is easy to code since `A` is tridiagonal. + + References + ---------- + .. [1] A. V. Knyazev (2001), + Toward the Optimal Preconditioned Eigensolver: Locally Optimal + Block Preconditioned Conjugate Gradient Method. + SIAM Journal on Scientific Computing 23, no. 2, + pp. 517-541. :doi:`10.1137/S1064827500366124` + + .. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov + (2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers + (BLOPEX) in hypre and PETSc. :arxiv:`0705.2626` + + .. [3] A. V. Knyazev's C and MATLAB implementations: + https://github.com/lobpcg/blopex + + Examples + -------- + Our first example is minimalistic - find the largest eigenvalue of + a diagonal matrix by solving the non-generalized eigenvalue problem + ``A x = lambda x`` without constraints or preconditioning. + + >>> import numpy as np + >>> from scipy.sparse import spdiags + >>> from scipy.sparse.linalg import LinearOperator, aslinearoperator + >>> from scipy.sparse.linalg import lobpcg + + The square matrix size is + + >>> n = 100 + + and its diagonal entries are 1, ..., 100 defined by + + >>> vals = np.arange(1, n + 1).astype(np.int16) + + The first mandatory input parameter in this test is + the sparse diagonal matrix `A` + of the eigenvalue problem ``A x = lambda x`` to solve. + + >>> A = spdiags(vals, 0, n, n) + >>> A = A.astype(np.int16) + >>> A.toarray() + array([[ 1, 0, 0, ..., 0, 0, 0], + [ 0, 2, 0, ..., 0, 0, 0], + [ 0, 0, 3, ..., 0, 0, 0], + ..., + [ 0, 0, 0, ..., 98, 0, 0], + [ 0, 0, 0, ..., 0, 99, 0], + [ 0, 0, 0, ..., 0, 0, 100]], dtype=int16) + + The second mandatory input parameter `X` is a 2D array with the + row dimension determining the number of requested eigenvalues. + `X` is an initial guess for targeted eigenvectors. + `X` must have linearly independent columns. + If no initial approximations available, randomly oriented vectors + commonly work best, e.g., with components normally distributed + around zero or uniformly distributed on the interval [-1 1]. + Setting the initial approximations to dtype ``np.float32`` + forces all iterative values to dtype ``np.float32`` speeding up + the run while still allowing accurate eigenvalue computations. + + >>> k = 1 + >>> rng = np.random.default_rng() + >>> X = rng.normal(size=(n, k)) + >>> X = X.astype(np.float32) + + >>> eigenvalues, _ = lobpcg(A, X, maxiter=60) + >>> eigenvalues + array([100.]) + >>> eigenvalues.dtype + dtype('float32') + + `lobpcg` needs only access the matrix product with `A` rather + then the matrix itself. Since the matrix `A` is diagonal in + this example, one can write a function of the matrix product + ``A @ X`` using the diagonal values ``vals`` only, e.g., by + element-wise multiplication with broadcasting in the lambda-function + + >>> A_lambda = lambda X: vals[:, np.newaxis] * X + + or the regular function + + >>> def A_matmat(X): + ... return vals[:, np.newaxis] * X + + and use the handle to one of these callables as an input + + >>> eigenvalues, _ = lobpcg(A_lambda, X, maxiter=60) + >>> eigenvalues + array([100.]) + >>> eigenvalues, _ = lobpcg(A_matmat, X, maxiter=60) + >>> eigenvalues + array([100.]) + + The traditional callable `LinearOperator` is no longer + necessary but still supported as the input to `lobpcg`. + Specifying ``matmat=A_matmat`` explicitly improves performance. + + >>> A_lo = LinearOperator((n, n), matvec=A_matmat, matmat=A_matmat, dtype=np.int16) + >>> eigenvalues, _ = lobpcg(A_lo, X, maxiter=80) + >>> eigenvalues + array([100.]) + + The least efficient callable option is `aslinearoperator`: + + >>> eigenvalues, _ = lobpcg(aslinearoperator(A), X, maxiter=80) + >>> eigenvalues + array([100.]) + + We now switch to computing the three smallest eigenvalues specifying + + >>> k = 3 + >>> X = np.random.default_rng().normal(size=(n, k)) + + and ``largest=False`` parameter + + >>> eigenvalues, _ = lobpcg(A, X, largest=False, maxiter=80) + >>> print(eigenvalues) + [1. 2. 3.] + + The next example illustrates computing 3 smallest eigenvalues of + the same matrix `A` given by the function handle ``A_matmat`` but + with constraints and preconditioning. + + Constraints - an optional input parameter is a 2D array comprising + of column vectors that the eigenvectors must be orthogonal to + + >>> Y = np.eye(n, 3) + + The preconditioner acts as the inverse of `A` in this example, but + in the reduced precision ``np.float32`` even though the initial `X` + and thus all iterates and the output are in full ``np.float64``. + + >>> inv_vals = 1./vals + >>> inv_vals = inv_vals.astype(np.float32) + >>> M = lambda X: inv_vals[:, np.newaxis] * X + + Let us now solve the eigenvalue problem for the matrix `A` first + without preconditioning requesting 80 iterations + + >>> eigenvalues, _ = lobpcg(A_matmat, X, Y=Y, largest=False, maxiter=80) + >>> eigenvalues + array([4., 5., 6.]) + >>> eigenvalues.dtype + dtype('float64') + + With preconditioning we need only 20 iterations from the same `X` + + >>> eigenvalues, _ = lobpcg(A_matmat, X, Y=Y, M=M, largest=False, maxiter=20) + >>> eigenvalues + array([4., 5., 6.]) + + Note that the vectors passed in `Y` are the eigenvectors of the 3 + smallest eigenvalues. The results returned above are orthogonal to those. + + The primary matrix `A` may be indefinite, e.g., after shifting + ``vals`` by 50 from 1, ..., 100 to -49, ..., 50, we still can compute + the 3 smallest or largest eigenvalues. + + >>> vals = vals - 50 + >>> X = rng.normal(size=(n, k)) + >>> eigenvalues, _ = lobpcg(A_matmat, X, largest=False, maxiter=99) + >>> eigenvalues + array([-49., -48., -47.]) + >>> eigenvalues, _ = lobpcg(A_matmat, X, largest=True, maxiter=99) + >>> eigenvalues + array([50., 49., 48.]) + + """ + blockVectorX = X + bestblockVectorX = blockVectorX + blockVectorY = Y + residualTolerance = tol + if maxiter is None: + maxiter = 20 + + bestIterationNumber = maxiter + + sizeY = 0 + if blockVectorY is not None: + if len(blockVectorY.shape) != 2: + warnings.warn( + f"Expected rank-2 array for argument Y, instead got " + f"{len(blockVectorY.shape)}, " + f"so ignore it and use no constraints.", + UserWarning, stacklevel=2 + ) + blockVectorY = None + else: + sizeY = blockVectorY.shape[1] + + # Block size. + if blockVectorX is None: + raise ValueError("The mandatory initial matrix X cannot be None") + if len(blockVectorX.shape) != 2: + raise ValueError("expected rank-2 array for argument X") + + n, sizeX = blockVectorX.shape + + # Data type of iterates, determined by X, must be inexact + if not np.issubdtype(blockVectorX.dtype, np.inexact): + warnings.warn( + f"Data type for argument X is {blockVectorX.dtype}, " + f"which is not inexact, so casted to np.float32.", + UserWarning, stacklevel=2 + ) + blockVectorX = np.asarray(blockVectorX, dtype=np.float32) + + if retLambdaHistory: + lambdaHistory = np.zeros((maxiter + 3, sizeX), + dtype=blockVectorX.dtype) + if retResidualNormsHistory: + residualNormsHistory = np.zeros((maxiter + 3, sizeX), + dtype=blockVectorX.dtype) + + if verbosityLevel: + aux = "Solving " + if B is None: + aux += "standard" + else: + aux += "generalized" + aux += " eigenvalue problem with" + if M is None: + aux += "out" + aux += " preconditioning\n\n" + aux += "matrix size %d\n" % n + aux += "block size %d\n\n" % sizeX + if blockVectorY is None: + aux += "No constraints\n\n" + else: + if sizeY > 1: + aux += "%d constraints\n\n" % sizeY + else: + aux += "%d constraint\n\n" % sizeY + print(aux) + + if (n - sizeY) < (5 * sizeX): + warnings.warn( + f"The problem size {n} minus the constraints size {sizeY} " + f"is too small relative to the block size {sizeX}. " + f"Using a dense eigensolver instead of LOBPCG iterations." + f"No output of the history of the iterations.", + UserWarning, stacklevel=2 + ) + + sizeX = min(sizeX, n) + + if blockVectorY is not None: + raise NotImplementedError( + "The dense eigensolver does not support constraints." + ) + + # Define the closed range of indices of eigenvalues to return. + if largest: + eigvals = (n - sizeX, n - 1) + else: + eigvals = (0, sizeX - 1) + + try: + if isinstance(A, LinearOperator): + A = A(np.eye(n, dtype=int)) + elif callable(A): + A = A(np.eye(n, dtype=int)) + if A.shape != (n, n): + raise ValueError( + f"The shape {A.shape} of the primary matrix\n" + f"defined by a callable object is wrong.\n" + ) + elif issparse(A): + A = A.toarray() + else: + A = np.asarray(A) + except Exception as e: + raise Exception( + f"Primary MatMul call failed with error\n" + f"{e}\n") + + if B is not None: + try: + if isinstance(B, LinearOperator): + B = B(np.eye(n, dtype=int)) + elif callable(B): + B = B(np.eye(n, dtype=int)) + if B.shape != (n, n): + raise ValueError( + f"The shape {B.shape} of the secondary matrix\n" + f"defined by a callable object is wrong.\n" + ) + elif issparse(B): + B = B.toarray() + else: + B = np.asarray(B) + except Exception as e: + raise Exception( + f"Secondary MatMul call failed with error\n" + f"{e}\n") + + try: + vals, vecs = eigh(A, + B, + subset_by_index=eigvals, + check_finite=False) + if largest: + # Reverse order to be compatible with eigs() in 'LM' mode. + vals = vals[::-1] + vecs = vecs[:, ::-1] + + return vals, vecs + except Exception as e: + raise Exception( + f"Dense eigensolver failed with error\n" + f"{e}\n" + ) + + if (residualTolerance is None) or (residualTolerance <= 0.0): + residualTolerance = np.sqrt(np.finfo(blockVectorX.dtype).eps) * n + + A = _makeMatMat(A) + B = _makeMatMat(B) + M = _makeMatMat(M) + + # Apply constraints to X. + if blockVectorY is not None: + + if B is not None: + blockVectorBY = B(blockVectorY) + if blockVectorBY.shape != blockVectorY.shape: + raise ValueError( + f"The shape {blockVectorY.shape} " + f"of the constraint not preserved\n" + f"and changed to {blockVectorBY.shape} " + f"after multiplying by the secondary matrix.\n" + ) + else: + blockVectorBY = blockVectorY + + # gramYBY is a dense array. + gramYBY = blockVectorY.T.conj() @ blockVectorBY + try: + # gramYBY is a Cholesky factor from now on... + gramYBY = cho_factor(gramYBY, overwrite_a=True) + except LinAlgError as e: + raise ValueError("Linearly dependent constraints") from e + + _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY) + + ## + # B-orthonormalize X. + blockVectorX, blockVectorBX, _ = _b_orthonormalize( + B, blockVectorX, verbosityLevel=verbosityLevel) + if blockVectorX is None: + raise ValueError("Linearly dependent initial approximations") + + ## + # Compute the initial Ritz vectors: solve the eigenproblem. + blockVectorAX = A(blockVectorX) + if blockVectorAX.shape != blockVectorX.shape: + raise ValueError( + f"The shape {blockVectorX.shape} " + f"of the initial approximations not preserved\n" + f"and changed to {blockVectorAX.shape} " + f"after multiplying by the primary matrix.\n" + ) + + gramXAX = blockVectorX.T.conj() @ blockVectorAX + + _lambda, eigBlockVector = eigh(gramXAX, check_finite=False) + ii = _get_indx(_lambda, sizeX, largest) + _lambda = _lambda[ii] + if retLambdaHistory: + lambdaHistory[0, :] = _lambda + + eigBlockVector = np.asarray(eigBlockVector[:, ii]) + blockVectorX = _matmul_inplace( + blockVectorX, eigBlockVector, + verbosityLevel=verbosityLevel + ) + blockVectorAX = _matmul_inplace( + blockVectorAX, eigBlockVector, + verbosityLevel=verbosityLevel + ) + if B is not None: + blockVectorBX = _matmul_inplace( + blockVectorBX, eigBlockVector, + verbosityLevel=verbosityLevel + ) + + ## + # Active index set. + activeMask = np.ones((sizeX,), dtype=bool) + + ## + # Main iteration loop. + + blockVectorP = None # set during iteration + blockVectorAP = None + blockVectorBP = None + + smallestResidualNorm = np.abs(np.finfo(blockVectorX.dtype).max) + + iterationNumber = -1 + restart = True + forcedRestart = False + explicitGramFlag = False + while iterationNumber < maxiter: + iterationNumber += 1 + + if B is not None: + aux = blockVectorBX * _lambda[np.newaxis, :] + else: + aux = blockVectorX * _lambda[np.newaxis, :] + + blockVectorR = blockVectorAX - aux + + aux = np.sum(blockVectorR.conj() * blockVectorR, 0) + residualNorms = np.sqrt(np.abs(aux)) + if retResidualNormsHistory: + residualNormsHistory[iterationNumber, :] = residualNorms + residualNorm = np.sum(np.abs(residualNorms)) / sizeX + + if residualNorm < smallestResidualNorm: + smallestResidualNorm = residualNorm + bestIterationNumber = iterationNumber + bestblockVectorX = blockVectorX + elif residualNorm > 2**restartControl * smallestResidualNorm: + forcedRestart = True + blockVectorAX = A(blockVectorX) + if blockVectorAX.shape != blockVectorX.shape: + raise ValueError( + f"The shape {blockVectorX.shape} " + f"of the restarted iterate not preserved\n" + f"and changed to {blockVectorAX.shape} " + f"after multiplying by the primary matrix.\n" + ) + if B is not None: + blockVectorBX = B(blockVectorX) + if blockVectorBX.shape != blockVectorX.shape: + raise ValueError( + f"The shape {blockVectorX.shape} " + f"of the restarted iterate not preserved\n" + f"and changed to {blockVectorBX.shape} " + f"after multiplying by the secondary matrix.\n" + ) + + ii = np.where(residualNorms > residualTolerance, True, False) + activeMask = activeMask & ii + currentBlockSize = activeMask.sum() + + if verbosityLevel: + print(f"iteration {iterationNumber}") + print(f"current block size: {currentBlockSize}") + print(f"eigenvalue(s):\n{_lambda}") + print(f"residual norm(s):\n{residualNorms}") + + if currentBlockSize == 0: + break + + activeBlockVectorR = _as2d(blockVectorR[:, activeMask]) + + if iterationNumber > 0: + activeBlockVectorP = _as2d(blockVectorP[:, activeMask]) + activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask]) + if B is not None: + activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask]) + + if M is not None: + # Apply preconditioner T to the active residuals. + activeBlockVectorR = M(activeBlockVectorR) + + ## + # Apply constraints to the preconditioned residuals. + if blockVectorY is not None: + _applyConstraints(activeBlockVectorR, + gramYBY, + blockVectorBY, + blockVectorY) + + ## + # B-orthogonalize the preconditioned residuals to X. + if B is not None: + activeBlockVectorR = activeBlockVectorR - ( + blockVectorX @ + (blockVectorBX.T.conj() @ activeBlockVectorR) + ) + else: + activeBlockVectorR = activeBlockVectorR - ( + blockVectorX @ + (blockVectorX.T.conj() @ activeBlockVectorR) + ) + + ## + # B-orthonormalize the preconditioned residuals. + aux = _b_orthonormalize( + B, activeBlockVectorR, verbosityLevel=verbosityLevel) + activeBlockVectorR, activeBlockVectorBR, _ = aux + + if activeBlockVectorR is None: + warnings.warn( + f"Failed at iteration {iterationNumber} with accuracies " + f"{residualNorms}\n not reaching the requested " + f"tolerance {residualTolerance}.", + UserWarning, stacklevel=2 + ) + break + activeBlockVectorAR = A(activeBlockVectorR) + + if iterationNumber > 0: + if B is not None: + aux = _b_orthonormalize( + B, activeBlockVectorP, activeBlockVectorBP, + verbosityLevel=verbosityLevel + ) + activeBlockVectorP, activeBlockVectorBP, invR = aux + else: + aux = _b_orthonormalize(B, activeBlockVectorP, + verbosityLevel=verbosityLevel) + activeBlockVectorP, _, invR = aux + # Function _b_orthonormalize returns None if Cholesky fails + if activeBlockVectorP is not None: + activeBlockVectorAP = _matmul_inplace( + activeBlockVectorAP, invR, + verbosityLevel=verbosityLevel + ) + restart = forcedRestart + else: + restart = True + + ## + # Perform the Rayleigh Ritz Procedure: + # Compute symmetric Gram matrices: + + if activeBlockVectorAR.dtype == "float32": + myeps = 1 + else: + myeps = np.sqrt(np.finfo(activeBlockVectorR.dtype).eps) + + if residualNorms.max() > myeps and not explicitGramFlag: + explicitGramFlag = False + else: + # Once explicitGramFlag, forever explicitGramFlag. + explicitGramFlag = True + + # Shared memory assignments to simplify the code + if B is None: + blockVectorBX = blockVectorX + activeBlockVectorBR = activeBlockVectorR + if not restart: + activeBlockVectorBP = activeBlockVectorP + + # Common submatrices: + gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR) + gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR) + + gramDtype = activeBlockVectorAR.dtype + if explicitGramFlag: + gramRAR = (gramRAR + gramRAR.T.conj()) / 2 + gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX) + gramXAX = (gramXAX + gramXAX.T.conj()) / 2 + gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX) + gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR) + gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR) + else: + gramXAX = np.diag(_lambda).astype(gramDtype) + gramXBX = np.eye(sizeX, dtype=gramDtype) + gramRBR = np.eye(currentBlockSize, dtype=gramDtype) + gramXBR = np.zeros((sizeX, currentBlockSize), dtype=gramDtype) + + if not restart: + gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP) + gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP) + gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP) + gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP) + gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP) + if explicitGramFlag: + gramPAP = (gramPAP + gramPAP.T.conj()) / 2 + gramPBP = np.dot(activeBlockVectorP.T.conj(), + activeBlockVectorBP) + else: + gramPBP = np.eye(currentBlockSize, dtype=gramDtype) + + gramA = np.block( + [ + [gramXAX, gramXAR, gramXAP], + [gramXAR.T.conj(), gramRAR, gramRAP], + [gramXAP.T.conj(), gramRAP.T.conj(), gramPAP], + ] + ) + gramB = np.block( + [ + [gramXBX, gramXBR, gramXBP], + [gramXBR.T.conj(), gramRBR, gramRBP], + [gramXBP.T.conj(), gramRBP.T.conj(), gramPBP], + ] + ) + + _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel) + + try: + _lambda, eigBlockVector = eigh(gramA, + gramB, + check_finite=False) + except LinAlgError as e: + # raise ValueError("eigh failed in lobpcg iterations") from e + if verbosityLevel: + warnings.warn( + f"eigh failed at iteration {iterationNumber} \n" + f"with error {e} causing a restart.\n", + UserWarning, stacklevel=2 + ) + # try again after dropping the direction vectors P from RR + restart = True + + if restart: + gramA = np.block([[gramXAX, gramXAR], [gramXAR.T.conj(), gramRAR]]) + gramB = np.block([[gramXBX, gramXBR], [gramXBR.T.conj(), gramRBR]]) + + _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel) + + try: + _lambda, eigBlockVector = eigh(gramA, + gramB, + check_finite=False) + except LinAlgError as e: + # raise ValueError("eigh failed in lobpcg iterations") from e + warnings.warn( + f"eigh failed at iteration {iterationNumber} with error\n" + f"{e}\n", + UserWarning, stacklevel=2 + ) + break + + ii = _get_indx(_lambda, sizeX, largest) + _lambda = _lambda[ii] + eigBlockVector = eigBlockVector[:, ii] + if retLambdaHistory: + lambdaHistory[iterationNumber + 1, :] = _lambda + + # Compute Ritz vectors. + if B is not None: + if not restart: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX: + sizeX + currentBlockSize] + eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:] + + pp = np.dot(activeBlockVectorR, eigBlockVectorR) + pp += np.dot(activeBlockVectorP, eigBlockVectorP) + + app = np.dot(activeBlockVectorAR, eigBlockVectorR) + app += np.dot(activeBlockVectorAP, eigBlockVectorP) + + bpp = np.dot(activeBlockVectorBR, eigBlockVectorR) + bpp += np.dot(activeBlockVectorBP, eigBlockVectorP) + else: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX:] + + pp = np.dot(activeBlockVectorR, eigBlockVectorR) + app = np.dot(activeBlockVectorAR, eigBlockVectorR) + bpp = np.dot(activeBlockVectorBR, eigBlockVectorR) + + blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp + blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app + blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp + + blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp + + else: + if not restart: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX: + sizeX + currentBlockSize] + eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:] + + pp = np.dot(activeBlockVectorR, eigBlockVectorR) + pp += np.dot(activeBlockVectorP, eigBlockVectorP) + + app = np.dot(activeBlockVectorAR, eigBlockVectorR) + app += np.dot(activeBlockVectorAP, eigBlockVectorP) + else: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX:] + + pp = np.dot(activeBlockVectorR, eigBlockVectorR) + app = np.dot(activeBlockVectorAR, eigBlockVectorR) + + blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp + blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app + + blockVectorP, blockVectorAP = pp, app + + if B is not None: + aux = blockVectorBX * _lambda[np.newaxis, :] + else: + aux = blockVectorX * _lambda[np.newaxis, :] + + blockVectorR = blockVectorAX - aux + + aux = np.sum(blockVectorR.conj() * blockVectorR, 0) + residualNorms = np.sqrt(np.abs(aux)) + # Use old lambda in case of early loop exit. + if retLambdaHistory: + lambdaHistory[iterationNumber + 1, :] = _lambda + if retResidualNormsHistory: + residualNormsHistory[iterationNumber + 1, :] = residualNorms + residualNorm = np.sum(np.abs(residualNorms)) / sizeX + if residualNorm < smallestResidualNorm: + smallestResidualNorm = residualNorm + bestIterationNumber = iterationNumber + 1 + bestblockVectorX = blockVectorX + + if np.max(np.abs(residualNorms)) > residualTolerance: + warnings.warn( + f"Exited at iteration {iterationNumber} with accuracies \n" + f"{residualNorms}\n" + f"not reaching the requested tolerance {residualTolerance}.\n" + f"Use iteration {bestIterationNumber} instead with accuracy \n" + f"{smallestResidualNorm}.\n", + UserWarning, stacklevel=2 + ) + + if verbosityLevel: + print(f"Final iterative eigenvalue(s):\n{_lambda}") + print(f"Final iterative residual norm(s):\n{residualNorms}") + + blockVectorX = bestblockVectorX + # Making eigenvectors "exactly" satisfy the blockVectorY constrains + if blockVectorY is not None: + _applyConstraints(blockVectorX, + gramYBY, + blockVectorBY, + blockVectorY) + + # Making eigenvectors "exactly" othonormalized by final "exact" RR + blockVectorAX = A(blockVectorX) + if blockVectorAX.shape != blockVectorX.shape: + raise ValueError( + f"The shape {blockVectorX.shape} " + f"of the postprocessing iterate not preserved\n" + f"and changed to {blockVectorAX.shape} " + f"after multiplying by the primary matrix.\n" + ) + gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX) + + blockVectorBX = blockVectorX + if B is not None: + blockVectorBX = B(blockVectorX) + if blockVectorBX.shape != blockVectorX.shape: + raise ValueError( + f"The shape {blockVectorX.shape} " + f"of the postprocessing iterate not preserved\n" + f"and changed to {blockVectorBX.shape} " + f"after multiplying by the secondary matrix.\n" + ) + + gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX) + _handle_gramA_gramB_verbosity(gramXAX, gramXBX, verbosityLevel) + gramXAX = (gramXAX + gramXAX.T.conj()) / 2 + gramXBX = (gramXBX + gramXBX.T.conj()) / 2 + try: + _lambda, eigBlockVector = eigh(gramXAX, + gramXBX, + check_finite=False) + except LinAlgError as e: + raise ValueError("eigh has failed in lobpcg postprocessing") from e + + ii = _get_indx(_lambda, sizeX, largest) + _lambda = _lambda[ii] + eigBlockVector = np.asarray(eigBlockVector[:, ii]) + + blockVectorX = np.dot(blockVectorX, eigBlockVector) + blockVectorAX = np.dot(blockVectorAX, eigBlockVector) + + if B is not None: + blockVectorBX = np.dot(blockVectorBX, eigBlockVector) + aux = blockVectorBX * _lambda[np.newaxis, :] + else: + aux = blockVectorX * _lambda[np.newaxis, :] + + blockVectorR = blockVectorAX - aux + + aux = np.sum(blockVectorR.conj() * blockVectorR, 0) + residualNorms = np.sqrt(np.abs(aux)) + + if retLambdaHistory: + lambdaHistory[bestIterationNumber + 1, :] = _lambda + if retResidualNormsHistory: + residualNormsHistory[bestIterationNumber + 1, :] = residualNorms + + if retLambdaHistory: + lambdaHistory = lambdaHistory[ + : bestIterationNumber + 2, :] + if retResidualNormsHistory: + residualNormsHistory = residualNormsHistory[ + : bestIterationNumber + 2, :] + + if np.max(np.abs(residualNorms)) > residualTolerance: + warnings.warn( + f"Exited postprocessing with accuracies \n" + f"{residualNorms}\n" + f"not reaching the requested tolerance {residualTolerance}.", + UserWarning, stacklevel=2 + ) + + if verbosityLevel: + print(f"Final postprocessing eigenvalue(s):\n{_lambda}") + print(f"Final residual norm(s):\n{residualNorms}") + + if retLambdaHistory: + lambdaHistory = np.vsplit(lambdaHistory, np.shape(lambdaHistory)[0]) + lambdaHistory = [np.squeeze(i) for i in lambdaHistory] + if retResidualNormsHistory: + residualNormsHistory = np.vsplit(residualNormsHistory, + np.shape(residualNormsHistory)[0]) + residualNormsHistory = [np.squeeze(i) for i in residualNormsHistory] + + if retLambdaHistory: + if retResidualNormsHistory: + return _lambda, blockVectorX, lambdaHistory, residualNormsHistory + else: + return _lambda, blockVectorX, lambdaHistory + else: + if retResidualNormsHistory: + return _lambda, blockVectorX, residualNormsHistory + else: + return _lambda, blockVectorX diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b9470799784728f309eacb5809f696ce0114470 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/test_lobpcg.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/test_lobpcg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6eb5c63d63a0f6538d20d0cef64d8d80f1303c88 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/test_lobpcg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py new file mode 100644 index 0000000000000000000000000000000000000000..4b060d77d1f7531b50b86e79a9132ef2ef7d506d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py @@ -0,0 +1,645 @@ +""" Test functions for the sparse.linalg._eigen.lobpcg module +""" + +import itertools +import platform +import sys +import pytest +import numpy as np +from numpy import ones, r_, diag +from numpy.testing import (assert_almost_equal, assert_equal, + assert_allclose, assert_array_less) + +from scipy import sparse +from scipy.linalg import eig, eigh, toeplitz, orth +from scipy.sparse import spdiags, diags, eye, csr_matrix +from scipy.sparse.linalg import eigs, LinearOperator +from scipy.sparse.linalg._eigen.lobpcg import lobpcg +from scipy.sparse.linalg._eigen.lobpcg.lobpcg import _b_orthonormalize +from scipy._lib._util import np_long, np_ulong + +_IS_32BIT = (sys.maxsize < 2**32) + +INT_DTYPES = {np.intc, np_long, np.longlong, np.uintc, np_ulong, np.ulonglong} +# np.half is unsupported on many test systems so excluded +REAL_DTYPES = {np.float32, np.float64, np.longdouble} +COMPLEX_DTYPES = {np.complex64, np.complex128, np.clongdouble} +# use sorted list to ensure fixed order of tests +VDTYPES = sorted(REAL_DTYPES ^ COMPLEX_DTYPES, key=str) +MDTYPES = sorted(INT_DTYPES ^ REAL_DTYPES ^ COMPLEX_DTYPES, key=str) + + +def sign_align(A, B): + """Align signs of columns of A match those of B: column-wise remove + sign of A by multiplying with its sign then multiply in sign of B. + """ + return np.array([col_A * np.sign(col_A[0]) * np.sign(col_B[0]) + for col_A, col_B in zip(A.T, B.T)]).T + +def ElasticRod(n): + """Build the matrices for the generalized eigenvalue problem of the + fixed-free elastic rod vibration model. + """ + L = 1.0 + le = L/n + rho = 7.85e3 + S = 1.e-4 + E = 2.1e11 + mass = rho*S*le/6. + k = E*S/le + A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1)) + B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1)) + return A, B + + +def MikotaPair(n): + """Build a pair of full diagonal matrices for the generalized eigenvalue + problem. The Mikota pair acts as a nice test since the eigenvalues are the + squares of the integers n, n=1,2,... + """ + x = np.arange(1, n+1) + B = diag(1./x) + y = np.arange(n-1, 0, -1) + z = np.arange(2*n-1, 0, -2) + A = diag(z)-diag(y, -1)-diag(y, 1) + return A, B + + +def compare_solutions(A, B, m): + """Check eig vs. lobpcg consistency. + """ + n = A.shape[0] + rnd = np.random.RandomState(0) + V = rnd.random((n, m)) + X = orth(V) + eigvals, _ = lobpcg(A, X, B=B, tol=1e-2, maxiter=50, largest=False) + eigvals.sort() + w, _ = eig(A, b=B) + w.sort() + assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2) + + +def test_Small(): + A, B = ElasticRod(10) + with pytest.warns(UserWarning, match="The problem size"): + compare_solutions(A, B, 10) + A, B = MikotaPair(10) + with pytest.warns(UserWarning, match="The problem size"): + compare_solutions(A, B, 10) + + +def test_ElasticRod(): + A, B = ElasticRod(20) + msg = "Exited at iteration.*|Exited postprocessing with accuracies.*" + with pytest.warns(UserWarning, match=msg): + compare_solutions(A, B, 2) + + +def test_MikotaPair(): + A, B = MikotaPair(20) + compare_solutions(A, B, 2) + + +@pytest.mark.parametrize("n", [50]) +@pytest.mark.parametrize("m", [1, 2, 10]) +@pytest.mark.parametrize("Vdtype", sorted(REAL_DTYPES, key=str)) +@pytest.mark.parametrize("Bdtype", sorted(REAL_DTYPES, key=str)) +@pytest.mark.parametrize("BVdtype", sorted(REAL_DTYPES, key=str)) +def test_b_orthonormalize(n, m, Vdtype, Bdtype, BVdtype): + """Test B-orthonormalization by Cholesky with callable 'B'. + The function '_b_orthonormalize' is key in LOBPCG but may + lead to numerical instabilities. The input vectors are often + badly scaled, so the function needs scale-invariant Cholesky; + see https://netlib.org/lapack/lawnspdf/lawn14.pdf. + """ + rnd = np.random.RandomState(0) + X = rnd.standard_normal((n, m)).astype(Vdtype) + Xcopy = np.copy(X) + vals = np.arange(1, n+1, dtype=float) + B = diags([vals], [0], (n, n)).astype(Bdtype) + BX = B @ X + BX = BX.astype(BVdtype) + dtype = min(X.dtype, B.dtype, BX.dtype) + # np.longdouble tol cannot be achieved on most systems + atol = m * n * max(np.finfo(dtype).eps, np.finfo(np.float64).eps) + + Xo, BXo, _ = _b_orthonormalize(lambda v: B @ v, X, BX) + # Check in-place. + assert_equal(X, Xo) + assert_equal(id(X), id(Xo)) + assert_equal(BX, BXo) + assert_equal(id(BX), id(BXo)) + # Check BXo. + assert_allclose(B @ Xo, BXo, atol=atol, rtol=atol) + # Check B-orthonormality + assert_allclose(Xo.T.conj() @ B @ Xo, np.identity(m), + atol=atol, rtol=atol) + # Repeat without BX in outputs + X = np.copy(Xcopy) + Xo1, BXo1, _ = _b_orthonormalize(lambda v: B @ v, X) + assert_allclose(Xo, Xo1, atol=atol, rtol=atol) + assert_allclose(BXo, BXo1, atol=atol, rtol=atol) + # Check in-place. + assert_equal(X, Xo1) + assert_equal(id(X), id(Xo1)) + # Check BXo1. + assert_allclose(B @ Xo1, BXo1, atol=atol, rtol=atol) + + # Introduce column-scaling in X. + scaling = 1.0 / np.geomspace(10, 1e10, num=m) + X = Xcopy * scaling + X = X.astype(Vdtype) + BX = B @ X + BX = BX.astype(BVdtype) + # Check scaling-invariance of Cholesky-based orthonormalization + Xo1, BXo1, _ = _b_orthonormalize(lambda v: B @ v, X, BX) + # The output should be the same, up the signs of the columns. + Xo1 = sign_align(Xo1, Xo) + assert_allclose(Xo, Xo1, atol=atol, rtol=atol) + BXo1 = sign_align(BXo1, BXo) + assert_allclose(BXo, BXo1, atol=atol, rtol=atol) + + +@pytest.mark.filterwarnings("ignore:Exited at iteration 0") +@pytest.mark.filterwarnings("ignore:Exited postprocessing") +def test_nonhermitian_warning(capsys): + """Check the warning of a Ritz matrix being not Hermitian + by feeding a non-Hermitian input matrix. + Also check stdout since verbosityLevel=1 and lack of stderr. + """ + n = 10 + X = np.arange(n * 2).reshape(n, 2).astype(np.float32) + A = np.arange(n * n).reshape(n, n).astype(np.float32) + with pytest.warns(UserWarning, match="Matrix gramA"): + _, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0) + out, err = capsys.readouterr() # Capture output + assert out.startswith("Solving standard eigenvalue") # Test stdout + assert err == '' # Test empty stderr + # Make the matrix symmetric and the UserWarning disappears. + A += A.T + _, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0) + out, err = capsys.readouterr() # Capture output + assert out.startswith("Solving standard eigenvalue") # Test stdout + assert err == '' # Test empty stderr + + +def test_regression(): + """Check the eigenvalue of the identity matrix is one. + """ + # https://mail.python.org/pipermail/scipy-user/2010-October/026944.html + n = 10 + X = np.ones((n, 1)) + A = np.identity(n) + w, _ = lobpcg(A, X) + assert_allclose(w, [1]) + + +@pytest.mark.filterwarnings("ignore:The problem size") +@pytest.mark.parametrize('n, m, m_excluded', [(30, 4, 3), (4, 2, 0)]) +def test_diagonal(n, m, m_excluded): + """Test ``m - m_excluded`` eigenvalues and eigenvectors of + diagonal matrices of the size ``n`` varying matrix formats: + dense array, spare matrix, and ``LinearOperator`` for both + matrixes in the generalized eigenvalue problem ``Av = cBv`` + and for the preconditioner. + """ + rnd = np.random.RandomState(0) + + # Define the generalized eigenvalue problem Av = cBv + # where (c, v) is a generalized eigenpair, + # A is the diagonal matrix whose entries are 1,...n, + # B is the identity matrix. + vals = np.arange(1, n+1, dtype=float) + A_s = diags([vals], [0], (n, n)) + A_a = A_s.toarray() + + def A_f(x): + return A_s @ x + + A_lo = LinearOperator(matvec=A_f, + matmat=A_f, + shape=(n, n), dtype=float) + + B_a = eye(n) + B_s = csr_matrix(B_a) + + def B_f(x): + return B_a @ x + + B_lo = LinearOperator(matvec=B_f, + matmat=B_f, + shape=(n, n), dtype=float) + + # Let the preconditioner M be the inverse of A. + M_s = diags([1./vals], [0], (n, n)) + M_a = M_s.toarray() + + def M_f(x): + return M_s @ x + + M_lo = LinearOperator(matvec=M_f, + matmat=M_f, + shape=(n, n), dtype=float) + + # Pick random initial vectors. + X = rnd.normal(size=(n, m)) + + # Require that the returned eigenvectors be in the orthogonal complement + # of the first few standard basis vectors. + if m_excluded > 0: + Y = np.eye(n, m_excluded) + else: + Y = None + + for A in [A_a, A_s, A_lo]: + for B in [B_a, B_s, B_lo]: + for M in [M_a, M_s, M_lo]: + eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y, + maxiter=40, largest=False) + + assert_allclose(eigvals, np.arange(1+m_excluded, + 1+m_excluded+m)) + _check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3) + + +def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14): + """Check if the eigenvalue residual is small. + """ + mult_wV = np.multiply(w, V) + dot_MV = M.dot(V) + assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol) + + +def _check_fiedler(n, p): + """Check the Fiedler vector computation. + """ + # This is not necessarily the recommended way to find the Fiedler vector. + col = np.zeros(n) + col[1] = 1 + A = toeplitz(col) + D = np.diag(A.sum(axis=1)) + L = D - A + # Compute the full eigendecomposition using tricks, e.g. + # http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf + tmp = np.pi * np.arange(n) / n + analytic_w = 2 * (1 - np.cos(tmp)) + analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp)) + _check_eigen(L, analytic_w, analytic_V) + # Compute the full eigendecomposition using eigh. + eigh_w, eigh_V = eigh(L) + _check_eigen(L, eigh_w, eigh_V) + # Check that the first eigenvalue is near zero and that the rest agree. + assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14) + assert_allclose(eigh_w[1:], analytic_w[1:]) + + # Check small lobpcg eigenvalues. + X = analytic_V[:, :p] + lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False) + assert_equal(lobpcg_w.shape, (p,)) + assert_equal(lobpcg_V.shape, (n, p)) + _check_eigen(L, lobpcg_w, lobpcg_V) + assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14) + assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p]) + + # Check large lobpcg eigenvalues. + X = analytic_V[:, -p:] + lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True) + assert_equal(lobpcg_w.shape, (p,)) + assert_equal(lobpcg_V.shape, (n, p)) + _check_eigen(L, lobpcg_w, lobpcg_V) + assert_allclose(np.sort(lobpcg_w), analytic_w[-p:]) + + # Look for the Fiedler vector using good but not exactly correct guesses. + fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2))) + X = np.vstack((np.ones(n), fiedler_guess)).T + lobpcg_w, _ = lobpcg(L, X, largest=False) + # Mathematically, the smaller eigenvalue should be zero + # and the larger should be the algebraic connectivity. + lobpcg_w = np.sort(lobpcg_w) + assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14) + + +def test_fiedler_small_8(): + """Check the dense workaround path for small matrices. + """ + # This triggers the dense path because 8 < 2*5. + with pytest.warns(UserWarning, match="The problem size"): + _check_fiedler(8, 2) + + +def test_fiedler_large_12(): + """Check the dense workaround path avoided for non-small matrices. + """ + # This does not trigger the dense path, because 2*5 <= 12. + _check_fiedler(12, 2) + + +@pytest.mark.filterwarnings("ignore:Failed at iteration") +@pytest.mark.filterwarnings("ignore:Exited at iteration") +@pytest.mark.filterwarnings("ignore:Exited postprocessing") +def test_failure_to_run_iterations(): + """Check that the code exits gracefully without breaking. Issue #10974. + The code may or not issue a warning, filtered out. Issue #15935, #17954. + """ + rnd = np.random.RandomState(0) + X = rnd.standard_normal((100, 10)) + A = X @ X.T + Q = rnd.standard_normal((X.shape[0], 4)) + eigenvalues, _ = lobpcg(A, Q, maxiter=40, tol=1e-12) + assert np.max(eigenvalues) > 0 + + +def test_failure_to_run_iterations_nonsymmetric(): + """Check that the code exists gracefully without breaking + if the matrix in not symmetric. + """ + A = np.zeros((10, 10)) + A[0, 1] = 1 + Q = np.ones((10, 1)) + msg = "Exited at iteration 2|Exited postprocessing with accuracies.*" + with pytest.warns(UserWarning, match=msg): + eigenvalues, _ = lobpcg(A, Q, maxiter=20) + assert np.max(eigenvalues) > 0 + + +@pytest.mark.filterwarnings("ignore:The problem size") +def test_hermitian(): + """Check complex-value Hermitian cases. + """ + rnd = np.random.RandomState(0) + + sizes = [3, 12] + ks = [1, 2] + gens = [True, False] + + for s, k, gen, dh, dx, db in ( + itertools.product(sizes, ks, gens, gens, gens, gens) + ): + H = rnd.random((s, s)) + 1.j * rnd.random((s, s)) + H = 10 * np.eye(s) + H + H.T.conj() + H = H.astype(np.complex128) if dh else H.astype(np.complex64) + + X = rnd.standard_normal((s, k)) + X = X + 1.j * rnd.standard_normal((s, k)) + X = X.astype(np.complex128) if dx else X.astype(np.complex64) + + if not gen: + B = np.eye(s) + w, v = lobpcg(H, X, maxiter=99, verbosityLevel=0) + # Also test mixing complex H with real B. + wb, _ = lobpcg(H, X, B, maxiter=99, verbosityLevel=0) + assert_allclose(w, wb, rtol=1e-6) + w0, _ = eigh(H) + else: + B = rnd.random((s, s)) + 1.j * rnd.random((s, s)) + B = 10 * np.eye(s) + B.dot(B.T.conj()) + B = B.astype(np.complex128) if db else B.astype(np.complex64) + w, v = lobpcg(H, X, B, maxiter=99, verbosityLevel=0) + w0, _ = eigh(H, B) + + for wx, vx in zip(w, v.T): + # Check eigenvector + assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx) + / np.linalg.norm(H.dot(vx)), + 0, atol=5e-2, rtol=0) + + # Compare eigenvalues + j = np.argmin(abs(w0 - wx)) + assert_allclose(wx, w0[j], rtol=1e-4) + + +# The n=5 case tests the alternative small matrix code path that uses eigh(). +@pytest.mark.filterwarnings("ignore:The problem size") +@pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)]) +def test_eigs_consistency(n, atol): + """Check eigs vs. lobpcg consistency. + """ + vals = np.arange(1, n+1, dtype=np.float64) + A = spdiags(vals, 0, n, n) + rnd = np.random.RandomState(0) + X = rnd.standard_normal((n, 2)) + lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100) + vals, _ = eigs(A, k=2) + + _check_eigen(A, lvals, lvecs, atol=atol, rtol=0) + assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14) + + +def test_verbosity(): + """Check that nonzero verbosity level code runs. + """ + rnd = np.random.RandomState(0) + X = rnd.standard_normal((10, 10)) + A = X @ X.T + Q = rnd.standard_normal((X.shape[0], 1)) + msg = "Exited at iteration.*|Exited postprocessing with accuracies.*" + with pytest.warns(UserWarning, match=msg): + _, _ = lobpcg(A, Q, maxiter=3, verbosityLevel=9) + + +@pytest.mark.xfail(_IS_32BIT and sys.platform == 'win32', + reason="tolerance violation on windows") +@pytest.mark.xfail(platform.machine() == 'ppc64le', + reason="fails on ppc64le") +@pytest.mark.filterwarnings("ignore:Exited postprocessing") +def test_tolerance_float32(): + """Check lobpcg for attainable tolerance in float32. + """ + rnd = np.random.RandomState(0) + n = 50 + m = 3 + vals = -np.arange(1, n + 1) + A = diags([vals], [0], (n, n)) + A = A.astype(np.float32) + X = rnd.standard_normal((n, m)) + X = X.astype(np.float32) + eigvals, _ = lobpcg(A, X, tol=1.25e-5, maxiter=50, verbosityLevel=0) + assert_allclose(eigvals, -np.arange(1, 1 + m), atol=2e-5, rtol=1e-5) + + +@pytest.mark.parametrize("vdtype", VDTYPES) +@pytest.mark.parametrize("mdtype", MDTYPES) +@pytest.mark.parametrize("arr_type", [np.array, + sparse.csr_matrix, + sparse.coo_matrix]) +def test_dtypes(vdtype, mdtype, arr_type): + """Test lobpcg in various dtypes. + """ + rnd = np.random.RandomState(0) + n = 12 + m = 2 + A = arr_type(np.diag(np.arange(1, n + 1)).astype(mdtype)) + X = rnd.random((n, m)) + X = X.astype(vdtype) + eigvals, eigvecs = lobpcg(A, X, tol=1e-2, largest=False) + assert_allclose(eigvals, np.arange(1, 1 + m), atol=1e-1) + # eigenvectors must be nearly real in any case + assert_allclose(np.sum(np.abs(eigvecs - eigvecs.conj())), 0, atol=1e-2) + + +@pytest.mark.filterwarnings("ignore:Exited at iteration") +@pytest.mark.filterwarnings("ignore:Exited postprocessing") +def test_inplace_warning(): + """Check lobpcg gives a warning in '_b_orthonormalize' + that in-place orthogonalization is impossible due to dtype mismatch. + """ + rnd = np.random.RandomState(0) + n = 6 + m = 1 + vals = -np.arange(1, n + 1) + A = diags([vals], [0], (n, n)) + A = A.astype(np.cdouble) + X = rnd.standard_normal((n, m)) + with pytest.warns(UserWarning, match="Inplace update"): + eigvals, _ = lobpcg(A, X, maxiter=2, verbosityLevel=1) + + +def test_maxit(): + """Check lobpcg if maxit=maxiter runs maxiter iterations and + if maxit=None runs 20 iterations (the default) + by checking the size of the iteration history output, which should + be the number of iterations plus 3 (initial, final, and postprocessing) + typically when maxiter is small and the choice of the best is passive. + """ + rnd = np.random.RandomState(0) + n = 50 + m = 4 + vals = -np.arange(1, n + 1) + A = diags([vals], [0], (n, n)) + A = A.astype(np.float32) + X = rnd.standard_normal((n, m)) + X = X.astype(np.float64) + msg = "Exited at iteration.*|Exited postprocessing with accuracies.*" + for maxiter in range(1, 4): + with pytest.warns(UserWarning, match=msg): + _, _, l_h, r_h = lobpcg(A, X, tol=1e-8, maxiter=maxiter, + retLambdaHistory=True, + retResidualNormsHistory=True) + assert_allclose(np.shape(l_h)[0], maxiter+3) + assert_allclose(np.shape(r_h)[0], maxiter+3) + with pytest.warns(UserWarning, match=msg): + l, _, l_h, r_h = lobpcg(A, X, tol=1e-8, + retLambdaHistory=True, + retResidualNormsHistory=True) + assert_allclose(np.shape(l_h)[0], 20+3) + assert_allclose(np.shape(r_h)[0], 20+3) + # Check that eigenvalue output is the last one in history + assert_allclose(l, l_h[-1]) + # Make sure that both history outputs are lists + assert isinstance(l_h, list) + assert isinstance(r_h, list) + # Make sure that both history lists are arrays-like + assert_allclose(np.shape(l_h), np.shape(np.asarray(l_h))) + assert_allclose(np.shape(r_h), np.shape(np.asarray(r_h))) + + +@pytest.mark.slow +@pytest.mark.parametrize("n", [15]) +@pytest.mark.parametrize("m", [1, 2]) +@pytest.mark.filterwarnings("ignore:Exited at iteration") +@pytest.mark.filterwarnings("ignore:Exited postprocessing") +def test_diagonal_data_types(n, m): + """Check lobpcg for diagonal matrices for all matrix types. + Constraints are imposed, so a dense eigensolver eig cannot run. + """ + rnd = np.random.RandomState(0) + # Define the generalized eigenvalue problem Av = cBv + # where (c, v) is a generalized eigenpair, + # and where we choose A and B to be diagonal. + vals = np.arange(1, n + 1) + + # list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'] + list_sparse_format = ['coo'] + sparse_formats = len(list_sparse_format) + for s_f_i, s_f in enumerate(list_sparse_format): + + As64 = diags([vals * vals], [0], (n, n), format=s_f) + As32 = As64.astype(np.float32) + Af64 = As64.toarray() + Af32 = Af64.astype(np.float32) + + def As32f(x): + return As32 @ x + As32LO = LinearOperator(matvec=As32f, + matmat=As32f, + shape=(n, n), + dtype=As32.dtype) + + listA = [Af64, As64, Af32, As32, As32f, As32LO, lambda v: As32 @ v] + + Bs64 = diags([vals], [0], (n, n), format=s_f) + Bf64 = Bs64.toarray() + Bs32 = Bs64.astype(np.float32) + + def Bs32f(x): + return Bs32 @ x + Bs32LO = LinearOperator(matvec=Bs32f, + matmat=Bs32f, + shape=(n, n), + dtype=Bs32.dtype) + listB = [Bf64, Bs64, Bs32, Bs32f, Bs32LO, lambda v: Bs32 @ v] + + # Define the preconditioner function as LinearOperator. + Ms64 = diags([1./vals], [0], (n, n), format=s_f) + + def Ms64precond(x): + return Ms64 @ x + Ms64precondLO = LinearOperator(matvec=Ms64precond, + matmat=Ms64precond, + shape=(n, n), + dtype=Ms64.dtype) + Mf64 = Ms64.toarray() + + def Mf64precond(x): + return Mf64 @ x + Mf64precondLO = LinearOperator(matvec=Mf64precond, + matmat=Mf64precond, + shape=(n, n), + dtype=Mf64.dtype) + Ms32 = Ms64.astype(np.float32) + + def Ms32precond(x): + return Ms32 @ x + Ms32precondLO = LinearOperator(matvec=Ms32precond, + matmat=Ms32precond, + shape=(n, n), + dtype=Ms32.dtype) + Mf32 = Ms32.toarray() + + def Mf32precond(x): + return Mf32 @ x + Mf32precondLO = LinearOperator(matvec=Mf32precond, + matmat=Mf32precond, + shape=(n, n), + dtype=Mf32.dtype) + listM = [None, Ms64, Ms64precondLO, Mf64precondLO, Ms64precond, + Ms32, Ms32precondLO, Mf32precondLO, Ms32precond] + + # Setup matrix of the initial approximation to the eigenvectors + # (cannot be sparse array). + Xf64 = rnd.random((n, m)) + Xf32 = Xf64.astype(np.float32) + listX = [Xf64, Xf32] + + # Require that the returned eigenvectors be in the orthogonal complement + # of the first few standard basis vectors (cannot be sparse array). + m_excluded = 3 + Yf64 = np.eye(n, m_excluded, dtype=float) + Yf32 = np.eye(n, m_excluded, dtype=np.float32) + listY = [Yf64, Yf32] + + tests = list(itertools.product(listA, listB, listM, listX, listY)) + # This is one of the slower tests because there are >1,000 configs + # to test here, instead of checking product of all input, output types + # test each configuration for the first sparse format, and then + # for one additional sparse format. this takes 2/7=30% as long as + # testing all configurations for all sparse formats. + if s_f_i > 0: + tests = tests[s_f_i - 1::sparse_formats-1] + + for A, B, M, X, Y in tests: + eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4, + maxiter=100, largest=False) + assert_allclose(eigvals, + np.arange(1 + m_excluded, 1 + m_excluded + m), + atol=1e-5) diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97c02324b9e323693121a1b226cfeb1e1b0c4f58 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a905ef1375ece0b7efbaa9fd4cbb224f130e4f6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/test_svds.py b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/test_svds.py new file mode 100644 index 0000000000000000000000000000000000000000..7d467b7684b47504702f2f59334c944be2fbe0d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/test_svds.py @@ -0,0 +1,862 @@ +import re +import copy +import numpy as np + +from numpy.testing import assert_allclose, assert_equal, assert_array_equal +import pytest + +from scipy.linalg import svd, null_space +from scipy.sparse import csc_matrix, issparse, spdiags, random +from scipy.sparse.linalg import LinearOperator, aslinearoperator +from scipy.sparse.linalg import svds +from scipy.sparse.linalg._eigen.arpack import ArpackNoConvergence + + +# --- Helper Functions / Classes --- + + +def sorted_svd(m, k, which='LM'): + # Compute svd of a dense matrix m, and return singular vectors/values + # sorted. + if issparse(m): + m = m.toarray() + u, s, vh = svd(m) + if which == 'LM': + ii = np.argsort(s)[-k:] + elif which == 'SM': + ii = np.argsort(s)[:k] + else: + raise ValueError(f"unknown which={which!r}") + + return u[:, ii], s[ii], vh[ii] + + +def _check_svds(A, k, u, s, vh, which="LM", check_usvh_A=False, + check_svd=True, atol=1e-10, rtol=1e-7): + n, m = A.shape + + # Check shapes. + assert_equal(u.shape, (n, k)) + assert_equal(s.shape, (k,)) + assert_equal(vh.shape, (k, m)) + + # Check that the original matrix can be reconstituted. + A_rebuilt = (u*s).dot(vh) + assert_equal(A_rebuilt.shape, A.shape) + if check_usvh_A: + assert_allclose(A_rebuilt, A, atol=atol, rtol=rtol) + + # Check that u is a semi-orthogonal matrix. + uh_u = np.dot(u.T.conj(), u) + assert_equal(uh_u.shape, (k, k)) + assert_allclose(uh_u, np.identity(k), atol=atol, rtol=rtol) + + # Check that vh is a semi-orthogonal matrix. + vh_v = np.dot(vh, vh.T.conj()) + assert_equal(vh_v.shape, (k, k)) + assert_allclose(vh_v, np.identity(k), atol=atol, rtol=rtol) + + # Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd + if check_svd: + u2, s2, vh2 = sorted_svd(A, k, which) + assert_allclose(np.abs(u), np.abs(u2), atol=atol, rtol=rtol) + assert_allclose(s, s2, atol=atol, rtol=rtol) + assert_allclose(np.abs(vh), np.abs(vh2), atol=atol, rtol=rtol) + + +def _check_svds_n(A, k, u, s, vh, which="LM", check_res=True, + check_svd=True, atol=1e-10, rtol=1e-7): + n, m = A.shape + + # Check shapes. + assert_equal(u.shape, (n, k)) + assert_equal(s.shape, (k,)) + assert_equal(vh.shape, (k, m)) + + # Check that u is a semi-orthogonal matrix. + uh_u = np.dot(u.T.conj(), u) + assert_equal(uh_u.shape, (k, k)) + error = np.sum(np.abs(uh_u - np.identity(k))) / (k * k) + assert_allclose(error, 0.0, atol=atol, rtol=rtol) + + # Check that vh is a semi-orthogonal matrix. + vh_v = np.dot(vh, vh.T.conj()) + assert_equal(vh_v.shape, (k, k)) + error = np.sum(np.abs(vh_v - np.identity(k))) / (k * k) + assert_allclose(error, 0.0, atol=atol, rtol=rtol) + + # Check residuals + if check_res: + ru = A.T.conj() @ u - vh.T.conj() * s + rus = np.sum(np.abs(ru)) / (n * k) + rvh = A @ vh.T.conj() - u * s + rvhs = np.sum(np.abs(rvh)) / (m * k) + assert_allclose(rus, 0.0, atol=atol, rtol=rtol) + assert_allclose(rvhs, 0.0, atol=atol, rtol=rtol) + + # Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd + if check_svd: + u2, s2, vh2 = sorted_svd(A, k, which) + assert_allclose(s, s2, atol=atol, rtol=rtol) + A_rebuilt_svd = (u2*s2).dot(vh2) + A_rebuilt = (u*s).dot(vh) + assert_equal(A_rebuilt.shape, A.shape) + error = np.sum(np.abs(A_rebuilt_svd - A_rebuilt)) / (k * k) + assert_allclose(error, 0.0, atol=atol, rtol=rtol) + + +class CheckingLinearOperator(LinearOperator): + def __init__(self, A): + self.A = A + self.dtype = A.dtype + self.shape = A.shape + + def _matvec(self, x): + assert_equal(max(x.shape), np.size(x)) + return self.A.dot(x) + + def _rmatvec(self, x): + assert_equal(max(x.shape), np.size(x)) + return self.A.T.conjugate().dot(x) + + +# --- Test Input Validation --- +# Tests input validation on parameters `k` and `which`. +# Needs better input validation checks for all other parameters. + +class SVDSCommonTests: + + solver = None + + # some of these IV tests could run only once, say with solver=None + + _A_empty_msg = "`A` must not be empty." + _A_dtype_msg = "`A` must be of floating or complex floating data type" + _A_type_msg = "type not understood" + _A_ndim_msg = "array must have ndim <= 2" + _A_validation_inputs = [ + (np.asarray([[]]), ValueError, _A_empty_msg), + (np.asarray([[1, 2], [3, 4]]), ValueError, _A_dtype_msg), + ("hi", TypeError, _A_type_msg), + (np.asarray([[[1., 2.], [3., 4.]]]), ValueError, _A_ndim_msg)] + + @pytest.mark.parametrize("args", _A_validation_inputs) + def test_svds_input_validation_A(self, args): + A, error_type, message = args + with pytest.raises(error_type, match=message): + svds(A, k=1, solver=self.solver) + + @pytest.mark.parametrize("k", [-1, 0, 3, 4, 5, 1.5, "1"]) + def test_svds_input_validation_k_1(self, k): + rng = np.random.default_rng(0) + A = rng.random((4, 3)) + + # propack can do complete SVD + if self.solver == 'propack' and k == 3: + res = svds(A, k=k, solver=self.solver, random_state=0) + _check_svds(A, k, *res, check_usvh_A=True, check_svd=True) + return + + message = ("`k` must be an integer satisfying") + with pytest.raises(ValueError, match=message): + svds(A, k=k, solver=self.solver) + + def test_svds_input_validation_k_2(self): + # I think the stack trace is reasonable when `k` can't be converted + # to an int. + message = "int() argument must be a" + with pytest.raises(TypeError, match=re.escape(message)): + svds(np.eye(10), k=[], solver=self.solver) + + message = "invalid literal for int()" + with pytest.raises(ValueError, match=message): + svds(np.eye(10), k="hi", solver=self.solver) + + @pytest.mark.parametrize("tol", (-1, np.inf, np.nan)) + def test_svds_input_validation_tol_1(self, tol): + message = "`tol` must be a non-negative floating point value." + with pytest.raises(ValueError, match=message): + svds(np.eye(10), tol=tol, solver=self.solver) + + @pytest.mark.parametrize("tol", ([], 'hi')) + def test_svds_input_validation_tol_2(self, tol): + # I think the stack trace is reasonable here + message = "'<' not supported between instances" + with pytest.raises(TypeError, match=message): + svds(np.eye(10), tol=tol, solver=self.solver) + + @pytest.mark.parametrize("which", ('LA', 'SA', 'ekki', 0)) + def test_svds_input_validation_which(self, which): + # Regression test for a github issue. + # https://github.com/scipy/scipy/issues/4590 + # Function was not checking for eigenvalue type and unintended + # values could be returned. + with pytest.raises(ValueError, match="`which` must be in"): + svds(np.eye(10), which=which, solver=self.solver) + + @pytest.mark.parametrize("transpose", (True, False)) + @pytest.mark.parametrize("n", range(4, 9)) + def test_svds_input_validation_v0_1(self, transpose, n): + rng = np.random.default_rng(0) + A = rng.random((5, 7)) + v0 = rng.random(n) + if transpose: + A = A.T + k = 2 + message = "`v0` must have shape" + + required_length = (A.shape[0] if self.solver == 'propack' + else min(A.shape)) + if n != required_length: + with pytest.raises(ValueError, match=message): + svds(A, k=k, v0=v0, solver=self.solver) + + def test_svds_input_validation_v0_2(self): + A = np.ones((10, 10)) + v0 = np.ones((1, 10)) + message = "`v0` must have shape" + with pytest.raises(ValueError, match=message): + svds(A, k=1, v0=v0, solver=self.solver) + + @pytest.mark.parametrize("v0", ("hi", 1, np.ones(10, dtype=int))) + def test_svds_input_validation_v0_3(self, v0): + A = np.ones((10, 10)) + message = "`v0` must be of floating or complex floating data type." + with pytest.raises(ValueError, match=message): + svds(A, k=1, v0=v0, solver=self.solver) + + @pytest.mark.parametrize("maxiter", (-1, 0, 5.5)) + def test_svds_input_validation_maxiter_1(self, maxiter): + message = ("`maxiter` must be a positive integer.") + with pytest.raises(ValueError, match=message): + svds(np.eye(10), maxiter=maxiter, solver=self.solver) + + def test_svds_input_validation_maxiter_2(self): + # I think the stack trace is reasonable when `k` can't be converted + # to an int. + message = "int() argument must be a" + with pytest.raises(TypeError, match=re.escape(message)): + svds(np.eye(10), maxiter=[], solver=self.solver) + + message = "invalid literal for int()" + with pytest.raises(ValueError, match=message): + svds(np.eye(10), maxiter="hi", solver=self.solver) + + @pytest.mark.parametrize("rsv", ('ekki', 10)) + def test_svds_input_validation_return_singular_vectors(self, rsv): + message = "`return_singular_vectors` must be in" + with pytest.raises(ValueError, match=message): + svds(np.eye(10), return_singular_vectors=rsv, solver=self.solver) + + # --- Test Parameters --- + + @pytest.mark.parametrize("k", [3, 5]) + @pytest.mark.parametrize("which", ["LM", "SM"]) + def test_svds_parameter_k_which(self, k, which): + # check that the `k` parameter sets the number of eigenvalues/ + # eigenvectors returned. + # Also check that the `which` parameter sets whether the largest or + # smallest eigenvalues are returned + rng = np.random.default_rng(0) + A = rng.random((10, 10)) + if self.solver == 'lobpcg': + with pytest.warns(UserWarning, match="The problem size"): + res = svds(A, k=k, which=which, solver=self.solver, + random_state=0) + else: + res = svds(A, k=k, which=which, solver=self.solver, + random_state=0) + _check_svds(A, k, *res, which=which, atol=8e-10) + + @pytest.mark.filterwarnings("ignore:Exited", + reason="Ignore LOBPCG early exit.") + # loop instead of parametrize for simplicity + def test_svds_parameter_tol(self): + # check the effect of the `tol` parameter on solver accuracy by solving + # the same problem with varying `tol` and comparing the eigenvalues + # against ground truth computed + n = 100 # matrix size + k = 3 # number of eigenvalues to check + + # generate a random, sparse-ish matrix + # effect isn't apparent for matrices that are too small + rng = np.random.default_rng(0) + A = rng.random((n, n)) + A[A > .1] = 0 + A = A @ A.T + + _, s, _ = svd(A) # calculate ground truth + + # calculate the error as a function of `tol` + A = csc_matrix(A) + + def err(tol): + _, s2, _ = svds(A, k=k, v0=np.ones(n), maxiter=1000, + solver=self.solver, tol=tol, random_state=0) + return np.linalg.norm((s2 - s[k-1::-1])/s[k-1::-1]) + + tols = [1e-4, 1e-2, 1e0] # tolerance levels to check + # for 'arpack' and 'propack', accuracies make discrete steps + accuracies = {'propack': [1e-12, 1e-6, 1e-4], + 'arpack': [2.5e-15, 1e-10, 1e-10], + 'lobpcg': [2e-12, 4e-2, 2]} + + for tol, accuracy in zip(tols, accuracies[self.solver]): + error = err(tol) + assert error < accuracy + + def test_svd_v0(self): + # check that the `v0` parameter affects the solution + n = 100 + k = 1 + # If k != 1, LOBPCG needs more initial vectors, which are generated + # with random_state, so it does not pass w/ k >= 2. + # For some other values of `n`, the AssertionErrors are not raised + # with different v0s, which is reasonable. + + rng = np.random.default_rng(0) + A = rng.random((n, n)) + + # with the same v0, solutions are the same, and they are accurate + # v0 takes precedence over random_state + v0a = rng.random(n) + res1a = svds(A, k, v0=v0a, solver=self.solver, random_state=0) + res2a = svds(A, k, v0=v0a, solver=self.solver, random_state=1) + for idx in range(3): + assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16) + _check_svds(A, k, *res1a) + + # with the same v0, solutions are the same, and they are accurate + v0b = rng.random(n) + res1b = svds(A, k, v0=v0b, solver=self.solver, random_state=2) + res2b = svds(A, k, v0=v0b, solver=self.solver, random_state=3) + for idx in range(3): + assert_allclose(res1b[idx], res2b[idx], rtol=1e-15, atol=2e-16) + _check_svds(A, k, *res1b) + + # with different v0, solutions can be numerically different + message = "Arrays are not equal" + with pytest.raises(AssertionError, match=message): + assert_equal(res1a, res1b) + + def test_svd_random_state(self): + # check that the `random_state` parameter affects the solution + # Admittedly, `n` and `k` are chosen so that all solver pass all + # these checks. That's a tall order, since LOBPCG doesn't want to + # achieve the desired accuracy and ARPACK often returns the same + # singular values/vectors for different v0. + n = 100 + k = 1 + + rng = np.random.default_rng(0) + A = rng.random((n, n)) + + # with the same random_state, solutions are the same and accurate + res1a = svds(A, k, solver=self.solver, random_state=0) + res2a = svds(A, k, solver=self.solver, random_state=0) + for idx in range(3): + assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16) + _check_svds(A, k, *res1a) + + # with the same random_state, solutions are the same and accurate + res1b = svds(A, k, solver=self.solver, random_state=1) + res2b = svds(A, k, solver=self.solver, random_state=1) + for idx in range(3): + assert_allclose(res1b[idx], res2b[idx], rtol=1e-15, atol=2e-16) + _check_svds(A, k, *res1b) + + # with different random_state, solutions can be numerically different + message = "Arrays are not equal" + with pytest.raises(AssertionError, match=message): + assert_equal(res1a, res1b) + + @pytest.mark.parametrize("random_state", (0, 1, + np.random.RandomState(0), + np.random.default_rng(0))) + def test_svd_random_state_2(self, random_state): + n = 100 + k = 1 + + rng = np.random.default_rng(0) + A = rng.random((n, n)) + + random_state_2 = copy.deepcopy(random_state) + + # with the same random_state, solutions are the same and accurate + res1a = svds(A, k, solver=self.solver, random_state=random_state) + res2a = svds(A, k, solver=self.solver, random_state=random_state_2) + for idx in range(3): + assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16) + _check_svds(A, k, *res1a) + + @pytest.mark.parametrize("random_state", (None, + np.random.RandomState(0), + np.random.default_rng(0))) + @pytest.mark.filterwarnings("ignore:Exited", + reason="Ignore LOBPCG early exit.") + def test_svd_random_state_3(self, random_state): + n = 100 + k = 5 + + rng = np.random.default_rng(0) + A = rng.random((n, n)) + + random_state = copy.deepcopy(random_state) + + # random_state in different state produces accurate - but not + # not necessarily identical - results + res1a = svds(A, k, solver=self.solver, random_state=random_state, maxiter=1000) + res2a = svds(A, k, solver=self.solver, random_state=random_state, maxiter=1000) + _check_svds(A, k, *res1a, atol=2e-7) + _check_svds(A, k, *res2a, atol=2e-7) + + message = "Arrays are not equal" + with pytest.raises(AssertionError, match=message): + assert_equal(res1a, res2a) + + @pytest.mark.filterwarnings("ignore:Exited postprocessing") + def test_svd_maxiter(self): + # check that maxiter works as expected: should not return accurate + # solution after 1 iteration, but should with default `maxiter` + A = np.diag(np.arange(9)).astype(np.float64) + k = 1 + u, s, vh = sorted_svd(A, k) + # Use default maxiter by default + maxiter = None + + if self.solver == 'arpack': + message = "ARPACK error -1: No convergence" + with pytest.raises(ArpackNoConvergence, match=message): + svds(A, k, ncv=3, maxiter=1, solver=self.solver) + elif self.solver == 'lobpcg': + # Set maxiter higher so test passes without changing + # default and breaking backward compatibility (gh-20221) + maxiter = 30 + with pytest.warns(UserWarning, match="Exited at iteration"): + svds(A, k, maxiter=1, solver=self.solver) + elif self.solver == 'propack': + message = "k=1 singular triplets did not converge within" + with pytest.raises(np.linalg.LinAlgError, match=message): + svds(A, k, maxiter=1, solver=self.solver) + + ud, sd, vhd = svds(A, k, solver=self.solver, maxiter=maxiter, + random_state=0) + _check_svds(A, k, ud, sd, vhd, atol=1e-8) + assert_allclose(np.abs(ud), np.abs(u), atol=1e-8) + assert_allclose(np.abs(vhd), np.abs(vh), atol=1e-8) + assert_allclose(np.abs(sd), np.abs(s), atol=1e-9) + + @pytest.mark.parametrize("rsv", (True, False, 'u', 'vh')) + @pytest.mark.parametrize("shape", ((5, 7), (6, 6), (7, 5))) + def test_svd_return_singular_vectors(self, rsv, shape): + # check that the return_singular_vectors parameter works as expected + rng = np.random.default_rng(0) + A = rng.random(shape) + k = 2 + M, N = shape + u, s, vh = sorted_svd(A, k) + + respect_u = True if self.solver == 'propack' else M <= N + respect_vh = True if self.solver == 'propack' else M > N + + if self.solver == 'lobpcg': + with pytest.warns(UserWarning, match="The problem size"): + if rsv is False: + s2 = svds(A, k, return_singular_vectors=rsv, + solver=self.solver, random_state=rng) + assert_allclose(s2, s) + elif rsv == 'u' and respect_u: + u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv, + solver=self.solver, random_state=rng) + assert_allclose(np.abs(u2), np.abs(u)) + assert_allclose(s2, s) + assert vh2 is None + elif rsv == 'vh' and respect_vh: + u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv, + solver=self.solver, random_state=rng) + assert u2 is None + assert_allclose(s2, s) + assert_allclose(np.abs(vh2), np.abs(vh)) + else: + u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv, + solver=self.solver, random_state=rng) + if u2 is not None: + assert_allclose(np.abs(u2), np.abs(u)) + assert_allclose(s2, s) + if vh2 is not None: + assert_allclose(np.abs(vh2), np.abs(vh)) + else: + if rsv is False: + s2 = svds(A, k, return_singular_vectors=rsv, + solver=self.solver, random_state=rng) + assert_allclose(s2, s) + elif rsv == 'u' and respect_u: + u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv, + solver=self.solver, random_state=rng) + assert_allclose(np.abs(u2), np.abs(u)) + assert_allclose(s2, s) + assert vh2 is None + elif rsv == 'vh' and respect_vh: + u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv, + solver=self.solver, random_state=rng) + assert u2 is None + assert_allclose(s2, s) + assert_allclose(np.abs(vh2), np.abs(vh)) + else: + u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv, + solver=self.solver, random_state=rng) + if u2 is not None: + assert_allclose(np.abs(u2), np.abs(u)) + assert_allclose(s2, s) + if vh2 is not None: + assert_allclose(np.abs(vh2), np.abs(vh)) + + # --- Test Basic Functionality --- + # Tests the accuracy of each solver for real and complex matrices provided + # as list, dense array, sparse matrix, and LinearOperator. + + A1 = [[1, 2, 3], [3, 4, 3], [1 + 1j, 0, 2], [0, 0, 1]] + A2 = [[1, 2, 3, 8 + 5j], [3 - 2j, 4, 3, 5], [1, 0, 2, 3], [0, 0, 1, 0]] + + @pytest.mark.filterwarnings("ignore:k >= N - 1", + reason="needed to demonstrate #16725") + @pytest.mark.parametrize('A', (A1, A2)) + @pytest.mark.parametrize('k', range(1, 5)) + # PROPACK fails a lot if @pytest.mark.parametrize('which', ("SM", "LM")) + @pytest.mark.parametrize('real', (True, False)) + @pytest.mark.parametrize('transpose', (False, True)) + # In gh-14299, it was suggested the `svds` should _not_ work with lists + @pytest.mark.parametrize('lo_type', (np.asarray, csc_matrix, + aslinearoperator)) + def test_svd_simple(self, A, k, real, transpose, lo_type): + + A = np.asarray(A) + A = np.real(A) if real else A + A = A.T if transpose else A + A2 = lo_type(A) + + # could check for the appropriate errors, but that is tested above + if k > min(A.shape): + pytest.skip("`k` cannot be greater than `min(A.shape)`") + if self.solver != 'propack' and k >= min(A.shape): + pytest.skip("Only PROPACK supports complete SVD") + if self.solver == 'arpack' and not real and k == min(A.shape) - 1: + pytest.skip("#16725") + + atol = 3e-10 + if self.solver == 'propack': + atol = 3e-9 # otherwise test fails on Linux aarch64 (see gh-19855) + + if self.solver == 'lobpcg': + with pytest.warns(UserWarning, match="The problem size"): + u, s, vh = svds(A2, k, solver=self.solver, random_state=0) + else: + u, s, vh = svds(A2, k, solver=self.solver, random_state=0) + _check_svds(A, k, u, s, vh, atol=atol) + + def test_svd_linop(self): + solver = self.solver + + nmks = [(6, 7, 3), + (9, 5, 4), + (10, 8, 5)] + + def reorder(args): + U, s, VH = args + j = np.argsort(s) + return U[:, j], s[j], VH[j, :] + + for n, m, k in nmks: + # Test svds on a LinearOperator. + A = np.random.RandomState(52).randn(n, m) + L = CheckingLinearOperator(A) + + if solver == 'propack': + v0 = np.ones(n) + else: + v0 = np.ones(min(A.shape)) + if solver == 'lobpcg': + with pytest.warns(UserWarning, match="The problem size"): + U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver, + random_state=0)) + U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver, + random_state=0)) + else: + U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver, + random_state=0)) + U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver, + random_state=0)) + + assert_allclose(np.abs(U1), np.abs(U2)) + assert_allclose(s1, s2) + assert_allclose(np.abs(VH1), np.abs(VH2)) + assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)), + np.dot(U2, np.dot(np.diag(s2), VH2))) + + # Try again with which="SM". + A = np.random.RandomState(1909).randn(n, m) + L = CheckingLinearOperator(A) + + # TODO: arpack crashes when v0=v0, which="SM" + kwargs = {'v0': v0} if solver not in {None, 'arpack'} else {} + if self.solver == 'lobpcg': + with pytest.warns(UserWarning, match="The problem size"): + U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver, + random_state=0, **kwargs)) + U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver, + random_state=0, **kwargs)) + else: + U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver, + random_state=0, **kwargs)) + U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver, + random_state=0, **kwargs)) + + assert_allclose(np.abs(U1), np.abs(U2)) + assert_allclose(s1 + 1, s2 + 1) + assert_allclose(np.abs(VH1), np.abs(VH2)) + assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)), + np.dot(U2, np.dot(np.diag(s2), VH2))) + + if k < min(n, m) - 1: + # Complex input and explicit which="LM". + for (dt, eps) in [(complex, 1e-7), (np.complex64, 3e-3)]: + rng = np.random.RandomState(1648) + A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt) + L = CheckingLinearOperator(A) + + if self.solver == 'lobpcg': + with pytest.warns(UserWarning, + match="The problem size"): + U1, s1, VH1 = reorder(svds(A, k, which="LM", + solver=solver, + random_state=0)) + U2, s2, VH2 = reorder(svds(L, k, which="LM", + solver=solver, + random_state=0)) + else: + U1, s1, VH1 = reorder(svds(A, k, which="LM", + solver=solver, + random_state=0)) + U2, s2, VH2 = reorder(svds(L, k, which="LM", + solver=solver, + random_state=0)) + + assert_allclose(np.abs(U1), np.abs(U2), rtol=eps) + assert_allclose(s1, s2, rtol=eps) + assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps) + assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)), + np.dot(U2, np.dot(np.diag(s2), VH2)), + rtol=eps) + + SHAPES = ((100, 100), (100, 101), (101, 100)) + + @pytest.mark.filterwarnings("ignore:Exited at iteration") + @pytest.mark.filterwarnings("ignore:Exited postprocessing") + @pytest.mark.parametrize("shape", SHAPES) + # ARPACK supports only dtype float, complex, or np.float32 + @pytest.mark.parametrize("dtype", (float, complex, np.float32)) + def test_small_sigma_sparse(self, shape, dtype): + # https://github.com/scipy/scipy/pull/11829 + solver = self.solver + # 2do: PROPACK fails orthogonality of singular vectors + # if dtype == complex and self.solver == 'propack': + # pytest.skip("PROPACK unsupported for complex dtype") + rng = np.random.default_rng(0) + k = 5 + (m, n) = shape + S = random(m, n, density=0.1, random_state=rng) + if dtype == complex: + S = + 1j * random(m, n, density=0.1, random_state=rng) + e = np.ones(m) + e[0:5] *= 1e1 ** np.arange(-5, 0, 1) + S = spdiags(e, 0, m, m) @ S + S = S.astype(dtype) + u, s, vh = svds(S, k, which='SM', solver=solver, maxiter=1000, + random_state=0) + c_svd = False # partial SVD can be different from full SVD + _check_svds_n(S, k, u, s, vh, which="SM", check_svd=c_svd, atol=2e-1) + + # --- Test Edge Cases --- + # Checks a few edge cases. + + @pytest.mark.parametrize("shape", ((6, 5), (5, 5), (5, 6))) + @pytest.mark.parametrize("dtype", (float, complex)) + def test_svd_LM_ones_matrix(self, shape, dtype): + # Check that svds can deal with matrix_rank less than k in LM mode. + k = 3 + n, m = shape + A = np.ones((n, m), dtype=dtype) + + if self.solver == 'lobpcg': + with pytest.warns(UserWarning, match="The problem size"): + U, s, VH = svds(A, k, solver=self.solver, random_state=0) + else: + U, s, VH = svds(A, k, solver=self.solver, random_state=0) + + _check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False) + + # Check that the largest singular value is near sqrt(n*m) + # and the other singular values have been forced to zero. + assert_allclose(np.max(s), np.sqrt(n*m)) + s = np.array(sorted(s)[:-1]) + 1 + z = np.ones_like(s) + assert_allclose(s, z) + + @pytest.mark.filterwarnings("ignore:k >= N - 1", + reason="needed to demonstrate #16725") + @pytest.mark.parametrize("shape", ((3, 4), (4, 4), (4, 3), (4, 2))) + @pytest.mark.parametrize("dtype", (float, complex)) + def test_zero_matrix(self, shape, dtype): + # Check that svds can deal with matrices containing only zeros; + # see https://github.com/scipy/scipy/issues/3452/ + # shape = (4, 2) is included because it is the particular case + # reported in the issue + k = 1 + n, m = shape + A = np.zeros((n, m), dtype=dtype) + + if (self.solver == 'arpack' and dtype is complex + and k == min(A.shape) - 1): + pytest.skip("#16725") + + if self.solver == 'propack': + pytest.skip("PROPACK failures unrelated to PR #16712") + + if self.solver == 'lobpcg': + with pytest.warns(UserWarning, match="The problem size"): + U, s, VH = svds(A, k, solver=self.solver, random_state=0) + else: + U, s, VH = svds(A, k, solver=self.solver, random_state=0) + + # Check some generic properties of svd. + _check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False) + + # Check that the singular values are zero. + assert_array_equal(s, 0) + + @pytest.mark.parametrize("shape", ((20, 20), (20, 21), (21, 20))) + # ARPACK supports only dtype float, complex, or np.float32 + @pytest.mark.parametrize("dtype", (float, complex, np.float32)) + @pytest.mark.filterwarnings("ignore:Exited", + reason="Ignore LOBPCG early exit.") + def test_small_sigma(self, shape, dtype): + rng = np.random.default_rng(179847540) + A = rng.random(shape).astype(dtype) + u, _, vh = svd(A, full_matrices=False) + if dtype == np.float32: + e = 10.0 + else: + e = 100.0 + t = e**(-np.arange(len(vh))).astype(dtype) + A = (u*t).dot(vh) + k = 4 + u, s, vh = svds(A, k, solver=self.solver, maxiter=100, random_state=0) + t = np.sum(s > 0) + assert_equal(t, k) + # LOBPCG needs larger atol and rtol to pass + _check_svds_n(A, k, u, s, vh, atol=1e-3, rtol=1e0, check_svd=False) + + # ARPACK supports only dtype float, complex, or np.float32 + @pytest.mark.filterwarnings("ignore:The problem size") + @pytest.mark.parametrize("dtype", (float, complex, np.float32)) + def test_small_sigma2(self, dtype): + rng = np.random.default_rng(179847540) + # create a 10x10 singular matrix with a 4-dim null space + dim = 4 + size = 10 + x = rng.random((size, size-dim)) + y = x[:, :dim] * rng.random(dim) + mat = np.hstack((x, y)) + mat = mat.astype(dtype) + + nz = null_space(mat) + assert_equal(nz.shape[1], dim) + + # Tolerances atol and rtol adjusted to pass np.float32 + # Use non-sparse svd + u, s, vh = svd(mat) + # Singular values are 0: + assert_allclose(s[-dim:], 0, atol=1e-6, rtol=1e0) + # Smallest right singular vectors in null space: + assert_allclose(mat @ vh[-dim:, :].T, 0, atol=1e-6, rtol=1e0) + + # Smallest singular values should be 0 + sp_mat = csc_matrix(mat) + su, ss, svh = svds(sp_mat, k=dim, which='SM', solver=self.solver, + random_state=0) + # Smallest dim singular values are 0: + assert_allclose(ss, 0, atol=1e-5, rtol=1e0) + # Smallest singular vectors via svds in null space: + n, m = mat.shape + if n < m: # else the assert fails with some libraries unclear why + assert_allclose(sp_mat.transpose() @ su, 0, atol=1e-5, rtol=1e0) + assert_allclose(sp_mat @ svh.T, 0, atol=1e-5, rtol=1e0) + +# --- Perform tests with each solver --- + + +class Test_SVDS_once: + @pytest.mark.parametrize("solver", ['ekki', object]) + def test_svds_input_validation_solver(self, solver): + message = "solver must be one of" + with pytest.raises(ValueError, match=message): + svds(np.ones((3, 4)), k=2, solver=solver) + + +class Test_SVDS_ARPACK(SVDSCommonTests): + + def setup_method(self): + self.solver = 'arpack' + + @pytest.mark.parametrize("ncv", list(range(-1, 8)) + [4.5, "5"]) + def test_svds_input_validation_ncv_1(self, ncv): + rng = np.random.default_rng(0) + A = rng.random((6, 7)) + k = 3 + if ncv in {4, 5}: + u, s, vh = svds(A, k=k, ncv=ncv, solver=self.solver, random_state=0) + # partial decomposition, so don't check that u@diag(s)@vh=A; + # do check that scipy.sparse.linalg.svds ~ scipy.linalg.svd + _check_svds(A, k, u, s, vh) + else: + message = ("`ncv` must be an integer satisfying") + with pytest.raises(ValueError, match=message): + svds(A, k=k, ncv=ncv, solver=self.solver) + + def test_svds_input_validation_ncv_2(self): + # I think the stack trace is reasonable when `ncv` can't be converted + # to an int. + message = "int() argument must be a" + with pytest.raises(TypeError, match=re.escape(message)): + svds(np.eye(10), ncv=[], solver=self.solver) + + message = "invalid literal for int()" + with pytest.raises(ValueError, match=message): + svds(np.eye(10), ncv="hi", solver=self.solver) + + # I can't see a robust relationship between `ncv` and relevant outputs + # (e.g. accuracy, time), so no test of the parameter. + + +class Test_SVDS_LOBPCG(SVDSCommonTests): + + def setup_method(self): + self.solver = 'lobpcg' + + +class Test_SVDS_PROPACK(SVDSCommonTests): + + def setup_method(self): + self.solver = 'propack' + + def test_svd_LM_ones_matrix(self): + message = ("PROPACK does not return orthonormal singular vectors " + "associated with zero singular values.") + # There are some other issues with this matrix of all ones, e.g. + # `which='sm'` and `k=1` returns the largest singular value + pytest.xfail(message) + + def test_svd_LM_zeros_matrix(self): + message = ("PROPACK does not return orthonormal singular vectors " + "associated with zero singular values.") + pytest.xfail(message)