diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c975bacc61311a4dd020dc9baaedba3befe319ce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/__init__.py @@ -0,0 +1,31 @@ +""" +========================================= +Clustering package (:mod:`scipy.cluster`) +========================================= + +.. currentmodule:: scipy.cluster + +.. toctree:: + :hidden: + + cluster.vq + cluster.hierarchy + +Clustering algorithms are useful in information theory, target detection, +communications, compression, and other areas. The `vq` module only +supports vector quantization and the k-means algorithms. + +The `hierarchy` module provides functions for hierarchical and +agglomerative clustering. Its features include generating hierarchical +clusters from distance matrices, +calculating statistics on clusters, cutting linkages +to generate flat clusters, and visualizing clusters with dendrograms. + +""" +__all__ = ['vq', 'hierarchy'] + +from . import vq, hierarchy + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96312c65bcedd12edcf86100fa51b15289642821 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0da5034c545ffd7b457836b549f305807d97bd25 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b7bb1eb38cce8263c1b2d9d45ecad3554eee842 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2c26033c53e222410c7809efade512ef1346cb31 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c5117f66d4551253475ddde28e49f6f169d8fbb9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ed59337d4cf218405bd5c30219f86fcc937cde95 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/hierarchy.py b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/hierarchy.py new file mode 100644 index 0000000000000000000000000000000000000000..6bdafcc7d57650e05020d2a6784b98b827da4f18 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/hierarchy.py @@ -0,0 +1,4173 @@ +""" +Hierarchical clustering (:mod:`scipy.cluster.hierarchy`) +======================================================== + +.. currentmodule:: scipy.cluster.hierarchy + +These functions cut hierarchical clusterings into flat clusterings +or find the roots of the forest formed by a cut by providing the flat +cluster ids of each observation. + +.. autosummary:: + :toctree: generated/ + + fcluster + fclusterdata + leaders + +These are routines for agglomerative clustering. + +.. autosummary:: + :toctree: generated/ + + linkage + single + complete + average + weighted + centroid + median + ward + +These routines compute statistics on hierarchies. + +.. autosummary:: + :toctree: generated/ + + cophenet + from_mlab_linkage + inconsistent + maxinconsts + maxdists + maxRstat + to_mlab_linkage + +Routines for visualizing flat clusters. + +.. autosummary:: + :toctree: generated/ + + dendrogram + +These are data structures and routines for representing hierarchies as +tree objects. + +.. autosummary:: + :toctree: generated/ + + ClusterNode + leaves_list + to_tree + cut_tree + optimal_leaf_ordering + +These are predicates for checking the validity of linkage and +inconsistency matrices as well as for checking isomorphism of two +flat cluster assignments. + +.. autosummary:: + :toctree: generated/ + + is_valid_im + is_valid_linkage + is_isomorphic + is_monotonic + correspond + num_obs_linkage + +Utility routines for plotting: + +.. autosummary:: + :toctree: generated/ + + set_link_color_palette + +Utility classes: + +.. autosummary:: + :toctree: generated/ + + DisjointSet -- data structure for incremental connectivity queries + +""" +# Copyright (C) Damian Eads, 2007-2008. New BSD License. + +# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com) +# +# Author: Damian Eads +# Date: September 22, 2007 +# +# Copyright (c) 2007, 2008, Damian Eads +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# - Redistributions of source code must retain the above +# copyright notice, this list of conditions and the +# following disclaimer. +# - Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# - Neither the name of the author nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import warnings +import bisect +from collections import deque + +import numpy as np +from . import _hierarchy, _optimal_leaf_ordering +import scipy.spatial.distance as distance +from scipy._lib._array_api import array_namespace, _asarray, copy +from scipy._lib._disjoint_set import DisjointSet + + +_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3, + 'median': 4, 'ward': 5, 'weighted': 6} +_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward') + +__all__ = ['ClusterNode', 'DisjointSet', 'average', 'centroid', 'complete', + 'cophenet', 'correspond', 'cut_tree', 'dendrogram', 'fcluster', + 'fclusterdata', 'from_mlab_linkage', 'inconsistent', + 'is_isomorphic', 'is_monotonic', 'is_valid_im', 'is_valid_linkage', + 'leaders', 'leaves_list', 'linkage', 'maxRstat', 'maxdists', + 'maxinconsts', 'median', 'num_obs_linkage', 'optimal_leaf_ordering', + 'set_link_color_palette', 'single', 'to_mlab_linkage', 'to_tree', + 'ward', 'weighted'] + + +class ClusterWarning(UserWarning): + pass + + +def _warning(s): + warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3) + + +def int_floor(arr, xp): + # array_api_strict is strict about not allowing `int()` on a float array. + # That's typically not needed, here it is - so explicitly convert + return int(xp.astype(xp.asarray(arr), xp.int64)) + + +def single(y): + """ + Perform single/min/nearest linkage on the condensed distance matrix ``y``. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + The linkage matrix. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import single, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = single(y) + >>> Z + array([[ 0., 1., 1., 2.], + [ 2., 12., 1., 3.], + [ 3., 4., 1., 2.], + [ 5., 14., 1., 3.], + [ 6., 7., 1., 2.], + [ 8., 16., 1., 3.], + [ 9., 10., 1., 2.], + [11., 18., 1., 3.], + [13., 15., 2., 6.], + [17., 20., 2., 9.], + [19., 21., 2., 12.]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 4, 5, 6, 1, 2, 3], dtype=int32) + >>> fcluster(Z, 1, criterion='distance') + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + >>> fcluster(Z, 2, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + """ + return linkage(y, method='single', metric='euclidean') + + +def complete(y): + """ + Perform complete/max/farthest point linkage on a condensed distance matrix. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + the `linkage` function documentation for more information + on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import complete, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = complete(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.41421356, 3. ], + [ 5. , 13. , 1.41421356, 3. ], + [ 8. , 14. , 1.41421356, 3. ], + [11. , 15. , 1.41421356, 3. ], + [16. , 17. , 4.12310563, 6. ], + [18. , 19. , 4.12310563, 6. ], + [20. , 21. , 5.65685425, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 4.5, criterion='distance') + array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + """ + return linkage(y, method='complete', metric='euclidean') + + +def average(y): + """ + Perform average/UPGMA linkage on a condensed distance matrix. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + `linkage` for more information on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import average, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = average(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.20710678, 3. ], + [ 5. , 13. , 1.20710678, 3. ], + [ 8. , 14. , 1.20710678, 3. ], + [11. , 15. , 1.20710678, 3. ], + [16. , 17. , 3.39675184, 6. ], + [18. , 19. , 3.39675184, 6. ], + [20. , 21. , 4.09206523, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='average', metric='euclidean') + + +def weighted(y): + """ + Perform weighted/WPGMA linkage on the condensed distance matrix. + + See `linkage` for more information on the return + structure and algorithm. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + `linkage` for more information on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import weighted, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = weighted(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 11. , 1. , 2. ], + [ 2. , 12. , 1.20710678, 3. ], + [ 8. , 13. , 1.20710678, 3. ], + [ 5. , 14. , 1.20710678, 3. ], + [10. , 15. , 1.20710678, 3. ], + [18. , 19. , 3.05595762, 6. ], + [16. , 17. , 3.32379407, 6. ], + [20. , 21. , 4.06357713, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 1, 2, 3, 10, 11, 12, 4, 6, 5], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='weighted', metric='euclidean') + + +def centroid(y): + """ + Perform centroid/UPGMC linkage. + + See `linkage` for more information on the input matrix, + return structure, and algorithm. + + The following are common calling conventions: + + 1. ``Z = centroid(y)`` + + Performs centroid/UPGMC linkage on the condensed distance + matrix ``y``. + + 2. ``Z = centroid(X)`` + + Performs centroid/UPGMC linkage on the observation matrix ``X`` + using Euclidean distance as the distance metric. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + an m by n array. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + the `linkage` function documentation for more information + on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import centroid, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = centroid(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3.33333333, 6. ], + [16. , 17. , 3.33333333, 6. ], + [20. , 21. , 3.33333333, 12. ]]) # may vary + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32) # may vary + >>> fcluster(Z, 1.1, criterion='distance') + array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32) # may vary + >>> fcluster(Z, 2, criterion='distance') + array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32) # may vary + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='centroid', metric='euclidean') + + +def median(y): + """ + Perform median/WPGMC linkage. + + See `linkage` for more information on the return structure + and algorithm. + + The following are common calling conventions: + + 1. ``Z = median(y)`` + + Performs median/WPGMC linkage on the condensed distance matrix + ``y``. See ``linkage`` for more information on the return + structure and algorithm. + + 2. ``Z = median(X)`` + + Performs median/WPGMC linkage on the observation matrix ``X`` + using Euclidean distance as the distance metric. See `linkage` + for more information on the return structure and algorithm. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + an m by n array. + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = median(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32) + >>> fcluster(Z, 1.1, criterion='distance') + array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32) + >>> fcluster(Z, 2, criterion='distance') + array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='median', metric='euclidean') + + +def ward(y): + """ + Perform Ward's linkage on a condensed distance matrix. + + See `linkage` for more information on the return structure + and algorithm. + + The following are common calling conventions: + + 1. ``Z = ward(y)`` + Performs Ward's linkage on the condensed distance matrix ``y``. + + 2. ``Z = ward(X)`` + Performs Ward's linkage on the observation matrix ``X`` using + Euclidean distance as the distance metric. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + an m by n array. + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. See + `linkage` for more information on the return structure and + algorithm. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = ward(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.1, criterion='distance') + array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32) + >>> fcluster(Z, 3, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 9, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='ward', metric='euclidean') + + +def linkage(y, method='single', metric='euclidean', optimal_ordering=False): + """ + Perform hierarchical/agglomerative clustering. + + The input y may be either a 1-D condensed distance matrix + or a 2-D array of observation vectors. + + If y is a 1-D condensed distance matrix, + then y must be a :math:`\\binom{n}{2}` sized + vector, where n is the number of original observations paired + in the distance matrix. The behavior of this function is very + similar to the MATLAB linkage function. + + A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the + :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and + ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A + cluster with an index less than :math:`n` corresponds to one of + the :math:`n` original observations. The distance between + clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The + fourth value ``Z[i, 3]`` represents the number of original + observations in the newly formed cluster. + + The following linkage methods are used to compute the distance + :math:`d(s, t)` between two clusters :math:`s` and + :math:`t`. The algorithm begins with a forest of clusters that + have yet to be used in the hierarchy being formed. When two + clusters :math:`s` and :math:`t` from this forest are combined + into a single cluster :math:`u`, :math:`s` and :math:`t` are + removed from the forest, and :math:`u` is added to the + forest. When only one cluster remains in the forest, the algorithm + stops, and this cluster becomes the root. + + A distance matrix is maintained at each iteration. The ``d[i,j]`` + entry corresponds to the distance between cluster :math:`i` and + :math:`j` in the original forest. + + At each iteration, the algorithm must update the distance matrix + to reflect the distance of the newly formed cluster u with the + remaining clusters in the forest. + + Suppose there are :math:`|u|` original observations + :math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and + :math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in + cluster :math:`v`. Recall, :math:`s` and :math:`t` are + combined to form cluster :math:`u`. Let :math:`v` be any + remaining cluster in the forest that is not :math:`u`. + + The following are methods for calculating the distance between the + newly formed cluster :math:`u` and each :math:`v`. + + * method='single' assigns + + .. math:: + d(u,v) = \\min(dist(u[i],v[j])) + + for all points :math:`i` in cluster :math:`u` and + :math:`j` in cluster :math:`v`. This is also known as the + Nearest Point Algorithm. + + * method='complete' assigns + + .. math:: + d(u, v) = \\max(dist(u[i],v[j])) + + for all points :math:`i` in cluster u and :math:`j` in + cluster :math:`v`. This is also known by the Farthest Point + Algorithm or Voor Hees Algorithm. + + * method='average' assigns + + .. math:: + d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])} + {(|u|*|v|)} + + for all points :math:`i` and :math:`j` where :math:`|u|` + and :math:`|v|` are the cardinalities of clusters :math:`u` + and :math:`v`, respectively. This is also called the UPGMA + algorithm. + + * method='weighted' assigns + + .. math:: + d(u,v) = (dist(s,v) + dist(t,v))/2 + + where cluster u was formed with cluster s and t and v + is a remaining cluster in the forest (also called WPGMA). + + * method='centroid' assigns + + .. math:: + dist(s,t) = ||c_s-c_t||_2 + + where :math:`c_s` and :math:`c_t` are the centroids of + clusters :math:`s` and :math:`t`, respectively. When two + clusters :math:`s` and :math:`t` are combined into a new + cluster :math:`u`, the new centroid is computed over all the + original objects in clusters :math:`s` and :math:`t`. The + distance then becomes the Euclidean distance between the + centroid of :math:`u` and the centroid of a remaining cluster + :math:`v` in the forest. This is also known as the UPGMC + algorithm. + + * method='median' assigns :math:`d(s,t)` like the ``centroid`` + method. When two clusters :math:`s` and :math:`t` are combined + into a new cluster :math:`u`, the average of centroids s and t + give the new centroid :math:`u`. This is also known as the + WPGMC algorithm. + + * method='ward' uses the Ward variance minimization algorithm. + The new entry :math:`d(u,v)` is computed as follows, + + .. math:: + + d(u,v) = \\sqrt{\\frac{|v|+|s|} + {T}d(v,s)^2 + + \\frac{|v|+|t|} + {T}d(v,t)^2 + - \\frac{|v|} + {T}d(s,t)^2} + + where :math:`u` is the newly joined cluster consisting of + clusters :math:`s` and :math:`t`, :math:`v` is an unused + cluster in the forest, :math:`T=|v|+|s|+|t|`, and + :math:`|*|` is the cardinality of its argument. This is also + known as the incremental algorithm. + + Warning: When the minimum distance pair in the forest is chosen, there + may be two or more pairs with the same minimum distance. This + implementation may choose a different minimum than the MATLAB + version. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed distance matrix + is a flat array containing the upper triangular of the distance matrix. + This is the form that ``pdist`` returns. Alternatively, a collection of + :math:`m` observation vectors in :math:`n` dimensions may be passed as + an :math:`m` by :math:`n` array. All elements of the condensed distance + matrix must be finite, i.e., no NaNs or infs. + method : str, optional + The linkage algorithm to use. See the ``Linkage Methods`` section below + for full descriptions. + metric : str or function, optional + The distance metric to use in the case that y is a collection of + observation vectors; ignored otherwise. See the ``pdist`` + function for a list of valid distance metrics. A custom distance + function can also be used. + optimal_ordering : bool, optional + If True, the linkage matrix will be reordered so that the distance + between successive leaves is minimal. This results in a more intuitive + tree structure when the data are visualized. defaults to False, because + this algorithm can be slow, particularly on large datasets [2]_. See + also the `optimal_leaf_ordering` function. + + .. versionadded:: 1.0.0 + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. + + Notes + ----- + 1. For method 'single', an optimized algorithm based on minimum spanning + tree is implemented. It has time complexity :math:`O(n^2)`. + For methods 'complete', 'average', 'weighted' and 'ward', an algorithm + called nearest-neighbors chain is implemented. It also has time + complexity :math:`O(n^2)`. + For other methods, a naive algorithm is implemented with :math:`O(n^3)` + time complexity. + All algorithms use :math:`O(n^2)` memory. + Refer to [1]_ for details about the algorithms. + 2. Methods 'centroid', 'median', and 'ward' are correctly defined only if + Euclidean pairwise metric is used. If `y` is passed as precomputed + pairwise distances, then it is the user's responsibility to assure that + these distances are in fact Euclidean, otherwise the produced result + will be incorrect. + + See Also + -------- + scipy.spatial.distance.pdist : pairwise distance metrics + + References + ---------- + .. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering + algorithms", :arXiv:`1109.2378v1`. + .. [2] Ziv Bar-Joseph, David K. Gifford, Tommi S. Jaakkola, "Fast optimal + leaf ordering for hierarchical clustering", 2001. Bioinformatics + :doi:`10.1093/bioinformatics/17.suppl_1.S22` + + Examples + -------- + >>> from scipy.cluster.hierarchy import dendrogram, linkage + >>> from matplotlib import pyplot as plt + >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] + + >>> Z = linkage(X, 'ward') + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + + >>> Z = linkage(X, 'single') + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + >>> plt.show() + """ + xp = array_namespace(y) + y = _asarray(y, order='C', dtype=xp.float64, xp=xp) + + if method not in _LINKAGE_METHODS: + raise ValueError(f"Invalid method: {method}") + + if method in _EUCLIDEAN_METHODS and metric != 'euclidean' and y.ndim == 2: + msg = f"`method={method}` requires the distance metric to be Euclidean" + raise ValueError(msg) + + if y.ndim == 1: + distance.is_valid_y(y, throw=True, name='y') + elif y.ndim == 2: + if (y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0) and + xp.all(y >= 0) and np.allclose(y, y.T)): + warnings.warn('The symmetric non-negative hollow observation ' + 'matrix looks suspiciously like an uncondensed ' + 'distance matrix', + ClusterWarning, stacklevel=2) + y = distance.pdist(y, metric) + y = xp.asarray(y) + else: + raise ValueError("`y` must be 1 or 2 dimensional.") + + if not xp.all(xp.isfinite(y)): + raise ValueError("The condensed distance matrix must contain only " + "finite values.") + + n = int(distance.num_obs_y(y)) + method_code = _LINKAGE_METHODS[method] + + y = np.asarray(y) + if method == 'single': + result = _hierarchy.mst_single_linkage(y, n) + elif method in ['complete', 'average', 'weighted', 'ward']: + result = _hierarchy.nn_chain(y, n, method_code) + else: + result = _hierarchy.fast_linkage(y, n, method_code) + result = xp.asarray(result) + + if optimal_ordering: + y = xp.asarray(y) + return optimal_leaf_ordering(result, y) + else: + return result + + +class ClusterNode: + """ + A tree node class for representing a cluster. + + Leaf nodes correspond to original observations, while non-leaf nodes + correspond to non-singleton clusters. + + The `to_tree` function converts a matrix returned by the linkage + function into an easy-to-use tree representation. + + All parameter names are also attributes. + + Parameters + ---------- + id : int + The node id. + left : ClusterNode instance, optional + The left child tree node. + right : ClusterNode instance, optional + The right child tree node. + dist : float, optional + Distance for this cluster in the linkage matrix. + count : int, optional + The number of samples in this cluster. + + See Also + -------- + to_tree : for converting a linkage matrix ``Z`` into a tree object. + + """ + + def __init__(self, id, left=None, right=None, dist=0, count=1): + if id < 0: + raise ValueError('The id must be non-negative.') + if dist < 0: + raise ValueError('The distance must be non-negative.') + if (left is None and right is not None) or \ + (left is not None and right is None): + raise ValueError('Only full or proper binary trees are permitted.' + ' This node has one child.') + if count < 1: + raise ValueError('A cluster must contain at least one original ' + 'observation.') + self.id = id + self.left = left + self.right = right + self.dist = dist + if self.left is None: + self.count = count + else: + self.count = left.count + right.count + + def __lt__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + f"to type {type(node)}") + return self.dist < node.dist + + def __gt__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + f"to type {type(node)}") + return self.dist > node.dist + + def __eq__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + f"to type {type(node)}") + return self.dist == node.dist + + def get_id(self): + """ + The identifier of the target node. + + For ``0 <= i < n``, `i` corresponds to original observation i. + For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed + at iteration ``i-n``. + + Returns + ------- + id : int + The identifier of the target node. + + """ + return self.id + + def get_count(self): + """ + The number of leaf nodes (original observations) belonging to + the cluster node nd. If the target node is a leaf, 1 is + returned. + + Returns + ------- + get_count : int + The number of leaf nodes below the target node. + + """ + return self.count + + def get_left(self): + """ + Return a reference to the left child tree object. + + Returns + ------- + left : ClusterNode + The left child of the target node. If the node is a leaf, + None is returned. + + """ + return self.left + + def get_right(self): + """ + Return a reference to the right child tree object. + + Returns + ------- + right : ClusterNode + The left child of the target node. If the node is a leaf, + None is returned. + + """ + return self.right + + def is_leaf(self): + """ + Return True if the target node is a leaf. + + Returns + ------- + leafness : bool + True if the target node is a leaf node. + + """ + return self.left is None + + def pre_order(self, func=(lambda x: x.id)): + """ + Perform pre-order traversal without recursive function calls. + + When a leaf node is first encountered, ``func`` is called with + the leaf node as its argument, and its result is appended to + the list. + + For example, the statement:: + + ids = root.pre_order(lambda x: x.id) + + returns a list of the node ids corresponding to the leaf nodes + of the tree as they appear from left to right. + + Parameters + ---------- + func : function + Applied to each leaf ClusterNode object in the pre-order traversal. + Given the ``i``-th leaf node in the pre-order traversal ``n[i]``, + the result of ``func(n[i])`` is stored in ``L[i]``. If not + provided, the index of the original observation to which the node + corresponds is used. + + Returns + ------- + L : list + The pre-order traversal. + + """ + # Do a preorder traversal, caching the result. To avoid having to do + # recursion, we'll store the previous index we've visited in a vector. + n = self.count + + curNode = [None] * (2 * n) + lvisited = set() + rvisited = set() + curNode[0] = self + k = 0 + preorder = [] + while k >= 0: + nd = curNode[k] + ndid = nd.id + if nd.is_leaf(): + preorder.append(func(nd)) + k = k - 1 + else: + if ndid not in lvisited: + curNode[k + 1] = nd.left + lvisited.add(ndid) + k = k + 1 + elif ndid not in rvisited: + curNode[k + 1] = nd.right + rvisited.add(ndid) + k = k + 1 + # If we've visited the left and right of this non-leaf + # node already, go up in the tree. + else: + k = k - 1 + + return preorder + + +_cnode_bare = ClusterNode(0) +_cnode_type = type(ClusterNode) + + +def _order_cluster_tree(Z): + """ + Return clustering nodes in bottom-up order by distance. + + Parameters + ---------- + Z : scipy.cluster.linkage array + The linkage matrix. + + Returns + ------- + nodes : list + A list of ClusterNode objects. + """ + q = deque() + tree = to_tree(Z) + q.append(tree) + nodes = [] + + while q: + node = q.popleft() + if not node.is_leaf(): + bisect.insort_left(nodes, node) + q.append(node.get_right()) + q.append(node.get_left()) + return nodes + + +def cut_tree(Z, n_clusters=None, height=None): + """ + Given a linkage matrix Z, return the cut tree. + + Parameters + ---------- + Z : scipy.cluster.linkage array + The linkage matrix. + n_clusters : array_like, optional + Number of clusters in the tree at the cut point. + height : array_like, optional + The height at which to cut the tree. Only possible for ultrametric + trees. + + Returns + ------- + cutree : array + An array indicating group membership at each agglomeration step. I.e., + for a full cut tree, in the first column each data point is in its own + cluster. At the next step, two nodes are merged. Finally, all + singleton and non-singleton clusters are in one group. If `n_clusters` + or `height` are given, the columns correspond to the columns of + `n_clusters` or `height`. + + Examples + -------- + >>> from scipy import cluster + >>> import numpy as np + >>> from numpy.random import default_rng + >>> rng = default_rng() + >>> X = rng.random((50, 4)) + >>> Z = cluster.hierarchy.ward(X) + >>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10]) + >>> cutree[:10] + array([[0, 0], + [1, 1], + [2, 2], + [3, 3], + [3, 4], + [2, 2], + [0, 0], + [1, 5], + [3, 6], + [4, 7]]) # random + + """ + xp = array_namespace(Z) + nobs = num_obs_linkage(Z) + nodes = _order_cluster_tree(Z) + + if height is not None and n_clusters is not None: + raise ValueError("At least one of either height or n_clusters " + "must be None") + elif height is None and n_clusters is None: # return the full cut tree + cols_idx = xp.arange(nobs) + elif height is not None: + height = xp.asarray(height) + heights = xp.asarray([x.dist for x in nodes]) + cols_idx = xp.searchsorted(heights, height) + else: + n_clusters = xp.asarray(n_clusters) + cols_idx = nobs - xp.searchsorted(xp.arange(nobs), n_clusters) + + try: + n_cols = len(cols_idx) + except TypeError: # scalar + n_cols = 1 + cols_idx = xp.asarray([cols_idx]) + + groups = xp.zeros((n_cols, nobs), dtype=xp.int64) + last_group = xp.arange(nobs) + if 0 in cols_idx: + groups[0] = last_group + + for i, node in enumerate(nodes): + idx = node.pre_order() + this_group = copy(last_group, xp=xp) + # TODO ARRAY_API complex indexing not supported + this_group[idx] = xp.min(last_group[idx]) + this_group[this_group > xp.max(last_group[idx])] -= 1 + if i + 1 in cols_idx: + groups[np.nonzero(i + 1 == cols_idx)[0]] = this_group + last_group = this_group + + return groups.T + + +def to_tree(Z, rd=False): + """ + Convert a linkage matrix into an easy-to-use tree object. + + The reference to the root `ClusterNode` object is returned (by default). + + Each `ClusterNode` object has a ``left``, ``right``, ``dist``, ``id``, + and ``count`` attribute. The left and right attributes point to + ClusterNode objects that were combined to generate the cluster. + If both are None then the `ClusterNode` object is a leaf node, its count + must be 1, and its distance is meaningless but set to 0. + + *Note: This function is provided for the convenience of the library + user. ClusterNodes are not used as input to any of the functions in this + library.* + + Parameters + ---------- + Z : ndarray + The linkage matrix in proper form (see the `linkage` + function documentation). + rd : bool, optional + When False (default), a reference to the root `ClusterNode` object is + returned. Otherwise, a tuple ``(r, d)`` is returned. ``r`` is a + reference to the root node while ``d`` is a list of `ClusterNode` + objects - one per original entry in the linkage matrix plus entries + for all clustering steps. If a cluster id is + less than the number of samples ``n`` in the data that the linkage + matrix describes, then it corresponds to a singleton cluster (leaf + node). + See `linkage` for more information on the assignment of cluster ids + to clusters. + + Returns + ------- + tree : ClusterNode or tuple (ClusterNode, list of ClusterNode) + If ``rd`` is False, a `ClusterNode`. + If ``rd`` is True, a list of length ``2*n - 1``, with ``n`` the number + of samples. See the description of `rd` above for more details. + + See Also + -------- + linkage, is_valid_linkage, ClusterNode + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> rng = np.random.default_rng() + >>> x = rng.random((5, 2)) + >>> Z = hierarchy.linkage(x) + >>> hierarchy.to_tree(Z) + >> rootnode, nodelist = hierarchy.to_tree(Z, rd=True) + >>> rootnode + >> len(nodelist) + 9 + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + # Number of original objects is equal to the number of rows plus 1. + n = Z.shape[0] + 1 + + # Create a list full of None's to store the node objects + d = [None] * (n * 2 - 1) + + # Create the nodes corresponding to the n original objects. + for i in range(0, n): + d[i] = ClusterNode(i) + + nd = None + + for i in range(Z.shape[0]): + row = Z[i, :] + + fi = int_floor(row[0], xp) + fj = int_floor(row[1], xp) + if fi > i + n: + raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' + 'is used before it is formed. See row %d, ' + 'column 0') % fi) + if fj > i + n: + raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' + 'is used before it is formed. See row %d, ' + 'column 1') % fj) + + nd = ClusterNode(i + n, d[fi], d[fj], row[2]) + # ^ id ^ left ^ right ^ dist + if row[3] != nd.count: + raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is ' + 'incorrect.') % i) + d[n + i] = nd + + if rd: + return (nd, d) + else: + return nd + + +def optimal_leaf_ordering(Z, y, metric='euclidean'): + """ + Given a linkage matrix Z and distance, reorder the cut tree. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. See + `linkage` for more information on the return structure and + algorithm. + y : ndarray + The condensed distance matrix from which Z was generated. + Alternatively, a collection of m observation vectors in n + dimensions may be passed as an m by n array. + metric : str or function, optional + The distance metric to use in the case that y is a collection of + observation vectors; ignored otherwise. See the ``pdist`` + function for a list of valid distance metrics. A custom distance + function can also be used. + + Returns + ------- + Z_ordered : ndarray + A copy of the linkage matrix Z, reordered to minimize the distance + between adjacent leaves. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> rng = np.random.default_rng() + >>> X = rng.standard_normal((10, 10)) + >>> Z = hierarchy.ward(X) + >>> hierarchy.leaves_list(Z) + array([0, 3, 1, 9, 2, 5, 7, 4, 6, 8], dtype=int32) + >>> hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, X)) + array([3, 0, 2, 5, 7, 4, 8, 6, 9, 1], dtype=int32) + + """ + xp = array_namespace(Z, y) + Z = _asarray(Z, order='C', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + y = _asarray(y, order='C', dtype=xp.float64, xp=xp) + + if y.ndim == 1: + distance.is_valid_y(y, throw=True, name='y') + elif y.ndim == 2: + if (y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0) and + np.all(y >= 0) and np.allclose(y, y.T)): + warnings.warn('The symmetric non-negative hollow observation ' + 'matrix looks suspiciously like an uncondensed ' + 'distance matrix', + ClusterWarning, stacklevel=2) + y = distance.pdist(y, metric) + y = xp.asarray(y) + else: + raise ValueError("`y` must be 1 or 2 dimensional.") + + if not xp.all(xp.isfinite(y)): + raise ValueError("The condensed distance matrix must contain only " + "finite values.") + + Z = np.asarray(Z) + y = np.asarray(y) + return xp.asarray(_optimal_leaf_ordering.optimal_leaf_ordering(Z, y)) + + +def cophenet(Z, Y=None): + """ + Calculate the cophenetic distances between each observation in + the hierarchical clustering defined by the linkage ``Z``. + + Suppose ``p`` and ``q`` are original observations in + disjoint clusters ``s`` and ``t``, respectively and + ``s`` and ``t`` are joined by a direct parent cluster + ``u``. The cophenetic distance between observations + ``i`` and ``j`` is simply the distance between + clusters ``s`` and ``t``. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as an array + (see `linkage` function). + Y : ndarray (optional) + Calculates the cophenetic correlation coefficient ``c`` of a + hierarchical clustering defined by the linkage matrix `Z` + of a set of :math:`n` observations in :math:`m` + dimensions. `Y` is the condensed distance matrix from which + `Z` was generated. + + Returns + ------- + c : ndarray + The cophentic correlation distance (if ``Y`` is passed). + d : ndarray + The cophenetic distance matrix in condensed form. The + :math:`ij` th entry is the cophenetic distance between + original observations :math:`i` and :math:`j`. + + See Also + -------- + linkage : + for a description of what a linkage matrix is. + scipy.spatial.distance.squareform : + transforming condensed matrices into square ones. + + Examples + -------- + >>> from scipy.cluster.hierarchy import single, cophenet + >>> from scipy.spatial.distance import pdist, squareform + + Given a dataset ``X`` and a linkage matrix ``Z``, the cophenetic distance + between two points of ``X`` is the distance between the largest two + distinct clusters that each of the points: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + ``X`` corresponds to this dataset :: + + x x x x + x x + + x x + x x x x + + >>> Z = single(pdist(X)) + >>> Z + array([[ 0., 1., 1., 2.], + [ 2., 12., 1., 3.], + [ 3., 4., 1., 2.], + [ 5., 14., 1., 3.], + [ 6., 7., 1., 2.], + [ 8., 16., 1., 3.], + [ 9., 10., 1., 2.], + [11., 18., 1., 3.], + [13., 15., 2., 6.], + [17., 20., 2., 9.], + [19., 21., 2., 12.]]) + >>> cophenet(Z) + array([1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 2., 2., + 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 1., 1., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 1., 1., 1.]) + + The output of the `scipy.cluster.hierarchy.cophenet` method is + represented in condensed form. We can use + `scipy.spatial.distance.squareform` to see the output as a + regular matrix (where each element ``ij`` denotes the cophenetic distance + between each ``i``, ``j`` pair of points in ``X``): + + >>> squareform(cophenet(Z)) + array([[0., 1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [1., 0., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [1., 1., 0., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 0., 1., 1., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 1., 0., 1., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 1., 1., 0., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 0., 1., 1., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 1., 0., 1., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 1., 1., 0., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 1., 1.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 0., 1.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 0.]]) + + In this example, the cophenetic distance between points on ``X`` that are + very close (i.e., in the same corner) is 1. For other pairs of points is 2, + because the points will be located in clusters at different + corners - thus, the distance between these clusters will be larger. + + """ + xp = array_namespace(Z, Y) + # Ensure float64 C-contiguous array. Cython code doesn't deal with striding. + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + n = Z.shape[0] + 1 + zz = np.zeros((n * (n-1)) // 2, dtype=np.float64) + + Z = np.asarray(Z) + _hierarchy.cophenetic_distances(Z, zz, int(n)) + zz = xp.asarray(zz) + if Y is None: + return zz + + Y = _asarray(Y, order='C', xp=xp) + distance.is_valid_y(Y, throw=True, name='Y') + + z = xp.mean(zz) + y = xp.mean(Y) + Yy = Y - y + Zz = zz - z + numerator = (Yy * Zz) + denomA = Yy**2 + denomB = Zz**2 + c = xp.sum(numerator) / xp.sqrt(xp.sum(denomA) * xp.sum(denomB)) + return (c, zz) + + +def inconsistent(Z, d=2): + r""" + Calculate inconsistency statistics on a linkage matrix. + + Parameters + ---------- + Z : ndarray + The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical + clustering). See `linkage` documentation for more information on its + form. + d : int, optional + The number of links up to `d` levels below each non-singleton cluster. + + Returns + ------- + R : ndarray + A :math:`(n-1)` by 4 matrix where the ``i``'th row contains the link + statistics for the non-singleton cluster ``i``. The link statistics are + computed over the link heights for links :math:`d` levels below the + cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard + deviation of the link heights, respectively; ``R[i,2]`` is the number + of links included in the calculation; and ``R[i,3]`` is the + inconsistency coefficient, + + .. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]} + + Notes + ----- + This function behaves similarly to the MATLAB(TM) ``inconsistent`` + function. + + Examples + -------- + >>> from scipy.cluster.hierarchy import inconsistent, linkage + >>> from matplotlib import pyplot as plt + >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] + >>> Z = linkage(X, 'ward') + >>> print(Z) + [[ 5. 6. 0. 2. ] + [ 2. 7. 0. 2. ] + [ 0. 4. 1. 2. ] + [ 1. 8. 1.15470054 3. ] + [ 9. 10. 2.12132034 4. ] + [ 3. 12. 4.11096096 5. ] + [11. 13. 14.07183949 8. ]] + >>> inconsistent(Z) + array([[ 0. , 0. , 1. , 0. ], + [ 0. , 0. , 1. , 0. ], + [ 1. , 0. , 1. , 0. ], + [ 0.57735027, 0.81649658, 2. , 0.70710678], + [ 1.04044011, 1.06123822, 3. , 1.01850858], + [ 3.11614065, 1.40688837, 2. , 0.70710678], + [ 6.44583366, 6.76770586, 3. , 1.12682288]]) + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + if (not d == np.floor(d)) or d < 0: + raise ValueError('The second argument d must be a nonnegative ' + 'integer value.') + + n = Z.shape[0] + 1 + R = np.zeros((n - 1, 4), dtype=np.float64) + + Z = np.asarray(Z) + _hierarchy.inconsistent(Z, R, int(n), int(d)) + R = xp.asarray(R) + return R + + +def from_mlab_linkage(Z): + """ + Convert a linkage matrix generated by MATLAB(TM) to a new + linkage matrix compatible with this module. + + The conversion does two things: + + * the indices are converted from ``1..N`` to ``0..(N-1)`` form, + and + + * a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the + number of original observations (leaves) in the non-singleton + cluster ``i``. + + This function is useful when loading in linkages from legacy data + files generated by MATLAB. + + Parameters + ---------- + Z : ndarray + A linkage matrix generated by MATLAB(TM). + + Returns + ------- + ZS : ndarray + A linkage matrix compatible with ``scipy.cluster.hierarchy``. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + to_mlab_linkage : transform from SciPy to MATLAB format. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.hierarchy import ward, from_mlab_linkage + + Given a linkage matrix in MATLAB format ``mZ``, we can use + `scipy.cluster.hierarchy.from_mlab_linkage` to import + it into SciPy format: + + >>> mZ = np.array([[1, 2, 1], [4, 5, 1], [7, 8, 1], + ... [10, 11, 1], [3, 13, 1.29099445], + ... [6, 14, 1.29099445], + ... [9, 15, 1.29099445], + ... [12, 16, 1.29099445], + ... [17, 18, 5.77350269], + ... [19, 20, 5.77350269], + ... [21, 22, 8.16496581]]) + + >>> Z = from_mlab_linkage(mZ) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [ 11. , 15. , 1.29099445, 3. ], + [ 16. , 17. , 5.77350269, 6. ], + [ 18. , 19. , 5.77350269, 6. ], + [ 20. , 21. , 8.16496581, 12. ]]) + + As expected, the linkage matrix ``Z`` returned includes an + additional column counting the number of original samples in + each cluster. Also, all cluster indices are reduced by 1 + (MATLAB format uses 1-indexing, whereas SciPy uses 0-indexing). + + """ + xp = array_namespace(Z) + Z = _asarray(Z, dtype=xp.float64, order='C', xp=xp) + Zs = Z.shape + + # If it's empty, return it. + if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): + return copy(Z, xp=xp) + + if len(Zs) != 2: + raise ValueError("The linkage array must be rectangular.") + + # If it contains no rows, return it. + if Zs[0] == 0: + return copy(Z, xp=xp) + + Zpart = copy(Z, xp=xp) + if xp.min(Zpart[:, 0:2]) != 1.0 and xp.max(Zpart[:, 0:2]) != 2 * Zs[0]: + raise ValueError('The format of the indices is not 1..N') + + Zpart[:, 0:2] -= 1.0 + CS = np.zeros((Zs[0],), dtype=np.float64) + Zpart = np.asarray(Zpart) + _hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1) + res = np.hstack([Zpart, CS.reshape(Zs[0], 1)]) + return xp.asarray(res) + + +def to_mlab_linkage(Z): + """ + Convert a linkage matrix to a MATLAB(TM) compatible one. + + Converts a linkage matrix ``Z`` generated by the linkage function + of this module to a MATLAB(TM) compatible one. The return linkage + matrix has the last column removed and the cluster indices are + converted to ``1..N`` indexing. + + Parameters + ---------- + Z : ndarray + A linkage matrix generated by ``scipy.cluster.hierarchy``. + + Returns + ------- + to_mlab_linkage : ndarray + A linkage matrix compatible with MATLAB(TM)'s hierarchical + clustering functions. + + The return linkage matrix has the last column removed + and the cluster indices are converted to ``1..N`` indexing. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + from_mlab_linkage : transform from Matlab to SciPy format. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, to_mlab_linkage + >>> from scipy.spatial.distance import pdist + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + After a linkage matrix ``Z`` has been created, we can use + `scipy.cluster.hierarchy.to_mlab_linkage` to convert it + into MATLAB format: + + >>> mZ = to_mlab_linkage(Z) + >>> mZ + array([[ 1. , 2. , 1. ], + [ 4. , 5. , 1. ], + [ 7. , 8. , 1. ], + [ 10. , 11. , 1. ], + [ 3. , 13. , 1.29099445], + [ 6. , 14. , 1.29099445], + [ 9. , 15. , 1.29099445], + [ 12. , 16. , 1.29099445], + [ 17. , 18. , 5.77350269], + [ 19. , 20. , 5.77350269], + [ 21. , 22. , 8.16496581]]) + + The new linkage matrix ``mZ`` uses 1-indexing for all the + clusters (instead of 0-indexing). Also, the last column of + the original linkage matrix has been dropped. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + Zs = Z.shape + if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): + return copy(Z, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + ZP = copy(Z[:, 0:3], xp=xp) + ZP[:, 0:2] += 1.0 + + return ZP + + +def is_monotonic(Z): + """ + Return True if the linkage passed is monotonic. + + The linkage is monotonic if for every cluster :math:`s` and :math:`t` + joined, the distance between them is no less than the distance + between any previously joined clusters. + + Parameters + ---------- + Z : ndarray + The linkage matrix to check for monotonicity. + + Returns + ------- + b : bool + A boolean indicating whether the linkage is monotonic. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, ward, is_monotonic + >>> from scipy.spatial.distance import pdist + + By definition, some hierarchical clustering algorithms - such as + `scipy.cluster.hierarchy.ward` - produce monotonic assignments of + samples to clusters; however, this is not always true for other + hierarchical methods - e.g. `scipy.cluster.hierarchy.median`. + + Given a linkage matrix ``Z`` (as the result of a hierarchical clustering + method) we can test programmatically whether it has the monotonicity + property or not, using `scipy.cluster.hierarchy.is_monotonic`: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> is_monotonic(Z) + True + + >>> Z = median(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> is_monotonic(Z) + False + + Note that this method is equivalent to just verifying that the distances + in the third column of the linkage matrix appear in a monotonically + increasing order. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + # We expect the i'th value to be greater than its successor. + return xp.all(Z[1:, 2] >= Z[:-1, 2]) + + +def is_valid_im(R, warning=False, throw=False, name=None): + """Return True if the inconsistency matrix passed is valid. + + It must be a :math:`n` by 4 array of doubles. The standard + deviations ``R[:,1]`` must be nonnegative. The link counts + ``R[:,2]`` must be positive and no greater than :math:`n-1`. + + Parameters + ---------- + R : ndarray + The inconsistency matrix to check for validity. + warning : bool, optional + When True, issues a Python warning if the linkage + matrix passed is invalid. + throw : bool, optional + When True, throws a Python exception if the linkage + matrix passed is invalid. + name : str, optional + This string refers to the variable name of the invalid + linkage matrix. + + Returns + ------- + b : bool + True if the inconsistency matrix is valid. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + inconsistent : for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, inconsistent, is_valid_im + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> R = inconsistent(Z) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [2.78516386, 2.58797734, 3. , 1.15470054], + [2.78516386, 2.58797734, 3. , 1.15470054], + [6.57065706, 1.38071187, 3. , 1.15470054]]) + + Now we can use `scipy.cluster.hierarchy.is_valid_im` to verify that + ``R`` is correct: + + >>> is_valid_im(R) + True + + However, if ``R`` is wrongly constructed (e.g., one of the standard + deviations is set to a negative value), then the check will fail: + + >>> R[-1,1] = R[-1,1] * -1 + >>> is_valid_im(R) + False + + """ + xp = array_namespace(R) + R = _asarray(R, order='c', xp=xp) + valid = True + name_str = "%r " % name if name else '' + try: + if R.dtype != xp.float64: + raise TypeError('Inconsistency matrix %smust contain doubles ' + '(double).' % name_str) + if len(R.shape) != 2: + raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. ' + 'be two-dimensional).' % name_str) + if R.shape[1] != 4: + raise ValueError('Inconsistency matrix %smust have 4 columns.' % + name_str) + if R.shape[0] < 1: + raise ValueError('Inconsistency matrix %smust have at least one ' + 'row.' % name_str) + if xp.any(R[:, 0] < 0): + raise ValueError('Inconsistency matrix %scontains negative link ' + 'height means.' % name_str) + if xp.any(R[:, 1] < 0): + raise ValueError('Inconsistency matrix %scontains negative link ' + 'height standard deviations.' % name_str) + if xp.any(R[:, 2] < 0): + raise ValueError('Inconsistency matrix %scontains negative link ' + 'counts.' % name_str) + except Exception as e: + if throw: + raise + if warning: + _warning(str(e)) + valid = False + + return valid + + +def is_valid_linkage(Z, warning=False, throw=False, name=None): + """ + Check the validity of a linkage matrix. + + A linkage matrix is valid if it is a 2-D array (type double) + with :math:`n` rows and 4 columns. The first two columns must contain + indices between 0 and :math:`2n-1`. For a given row ``i``, the following + two expressions have to hold: + + .. math:: + + 0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1 + 0 \\leq Z[i,1] \\leq i+n-1 + + I.e., a cluster cannot join another cluster unless the cluster being joined + has been generated. + + Parameters + ---------- + Z : array_like + Linkage matrix. + warning : bool, optional + When True, issues a Python warning if the linkage + matrix passed is invalid. + throw : bool, optional + When True, throws a Python exception if the linkage + matrix passed is invalid. + name : str, optional + This string refers to the variable name of the invalid + linkage matrix. + + Returns + ------- + b : bool + True if the inconsistency matrix is valid. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, is_valid_linkage + >>> from scipy.spatial.distance import pdist + + All linkage matrices generated by the clustering methods in this module + will be valid (i.e., they will have the appropriate dimensions and the two + required expressions will hold for all the rows). + + We can check this using `scipy.cluster.hierarchy.is_valid_linkage`: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> is_valid_linkage(Z) + True + + However, if we create a linkage matrix in a wrong way - or if we modify + a valid one in a way that any of the required expressions don't hold + anymore, then the check will fail: + + >>> Z[3][1] = 20 # the cluster number 20 is not defined at this point + >>> is_valid_linkage(Z) + False + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + valid = True + name_str = "%r " % name if name else '' + try: + if Z.dtype != xp.float64: + raise TypeError('Linkage matrix %smust contain doubles.' % name_str) + if len(Z.shape) != 2: + raise ValueError('Linkage matrix %smust have shape=2 (i.e. be ' + 'two-dimensional).' % name_str) + if Z.shape[1] != 4: + raise ValueError('Linkage matrix %smust have 4 columns.' % name_str) + if Z.shape[0] == 0: + raise ValueError('Linkage must be computed on at least two ' + 'observations.') + n = Z.shape[0] + if n > 1: + if (xp.any(Z[:, 0] < 0) or xp.any(Z[:, 1] < 0)): + raise ValueError('Linkage %scontains negative indices.' % + name_str) + if xp.any(Z[:, 2] < 0): + raise ValueError('Linkage %scontains negative distances.' % + name_str) + if xp.any(Z[:, 3] < 0): + raise ValueError('Linkage %scontains negative counts.' % + name_str) + if _check_hierarchy_uses_cluster_before_formed(Z): + raise ValueError('Linkage %suses non-singleton cluster before ' + 'it is formed.' % name_str) + if _check_hierarchy_uses_cluster_more_than_once(Z): + raise ValueError('Linkage %suses the same cluster more than once.' + % name_str) + except Exception as e: + if throw: + raise + if warning: + _warning(str(e)) + valid = False + + return valid + + +def _check_hierarchy_uses_cluster_before_formed(Z): + n = Z.shape[0] + 1 + for i in range(0, n - 1): + if Z[i, 0] >= n + i or Z[i, 1] >= n + i: + return True + return False + + +def _check_hierarchy_uses_cluster_more_than_once(Z): + n = Z.shape[0] + 1 + chosen = set() + for i in range(0, n - 1): + used_more_than_once = ( + (float(Z[i, 0]) in chosen) + or (float(Z[i, 1]) in chosen) + or Z[i, 0] == Z[i, 1] + ) + if used_more_than_once: + return True + chosen.add(float(Z[i, 0])) + chosen.add(float(Z[i, 1])) + return False + + +def _check_hierarchy_not_all_clusters_used(Z): + n = Z.shape[0] + 1 + chosen = set() + for i in range(0, n - 1): + chosen.add(int(Z[i, 0])) + chosen.add(int(Z[i, 1])) + must_chosen = set(range(0, 2 * n - 2)) + return len(must_chosen.difference(chosen)) > 0 + + +def num_obs_linkage(Z): + """ + Return the number of original observations of the linkage matrix passed. + + Parameters + ---------- + Z : ndarray + The linkage matrix on which to perform the operation. + + Returns + ------- + n : int + The number of original observations in the linkage. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, num_obs_linkage + >>> from scipy.spatial.distance import pdist + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + ``Z`` is a linkage matrix obtained after using the Ward clustering method + with ``X``, a dataset with 12 data points. + + >>> num_obs_linkage(Z) + 12 + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + return (Z.shape[0] + 1) + + +def correspond(Z, Y): + """ + Check for correspondence between linkage and condensed distance matrices. + + They must have the same number of original observations for + the check to succeed. + + This function is useful as a sanity check in algorithms that make + extensive use of linkage and distance matrices that must + correspond to the same set of original observations. + + Parameters + ---------- + Z : array_like + The linkage matrix to check for correspondence. + Y : array_like + The condensed distance matrix to check for correspondence. + + Returns + ------- + b : bool + A boolean indicating whether the linkage matrix and distance + matrix could possibly correspond to one another. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, correspond + >>> from scipy.spatial.distance import pdist + + This method can be used to check if a given linkage matrix ``Z`` has been + obtained from the application of a cluster method over a dataset ``X``: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + >>> X_condensed = pdist(X) + >>> Z = ward(X_condensed) + + Here, we can compare ``Z`` and ``X`` (in condensed form): + + >>> correspond(Z, X_condensed) + True + + """ + is_valid_linkage(Z, throw=True) + distance.is_valid_y(Y, throw=True) + xp = array_namespace(Z, Y) + Z = _asarray(Z, order='c', xp=xp) + Y = _asarray(Y, order='c', xp=xp) + return distance.num_obs_y(Y) == num_obs_linkage(Z) + + +def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None): + """ + Form flat clusters from the hierarchical clustering defined by + the given linkage matrix. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded with the matrix returned + by the `linkage` function. + t : scalar + For criteria 'inconsistent', 'distance' or 'monocrit', + this is the threshold to apply when forming flat clusters. + For 'maxclust' or 'maxclust_monocrit' criteria, + this would be max number of clusters requested. + criterion : str, optional + The criterion to use in forming flat clusters. This can + be any of the following values: + + ``inconsistent`` : + If a cluster node and all its + descendants have an inconsistent value less than or equal + to `t`, then all its leaf descendants belong to the + same flat cluster. When no non-singleton cluster meets + this criterion, every node is assigned to its own + cluster. (Default) + + ``distance`` : + Forms flat clusters so that the original + observations in each flat cluster have no greater a + cophenetic distance than `t`. + + ``maxclust`` : + Finds a minimum threshold ``r`` so that + the cophenetic distance between any two original + observations in the same flat cluster is no more than + ``r`` and no more than `t` flat clusters are formed. + + ``monocrit`` : + Forms a flat cluster from a cluster node c + with index i when ``monocrit[j] <= t``. + + For example, to threshold on the maximum mean distance + as computed in the inconsistency matrix R with a + threshold of 0.8 do:: + + MR = maxRstat(Z, R, 3) + fcluster(Z, t=0.8, criterion='monocrit', monocrit=MR) + + ``maxclust_monocrit`` : + Forms a flat cluster from a + non-singleton cluster node ``c`` when ``monocrit[i] <= + r`` for all cluster indices ``i`` below and including + ``c``. ``r`` is minimized such that no more than ``t`` + flat clusters are formed. monocrit must be + monotonic. For example, to minimize the threshold t on + maximum inconsistency values so that no more than 3 flat + clusters are formed, do:: + + MI = maxinconsts(Z, R) + fcluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI) + depth : int, optional + The maximum depth to perform the inconsistency calculation. + It has no meaning for the other criteria. Default is 2. + R : ndarray, optional + The inconsistency matrix to use for the ``'inconsistent'`` + criterion. This matrix is computed if not provided. + monocrit : ndarray, optional + An array of length n-1. `monocrit[i]` is the + statistics upon which non-singleton i is thresholded. The + monocrit vector must be monotonic, i.e., given a node c with + index i, for all node indices j corresponding to nodes + below c, ``monocrit[i] >= monocrit[j]``. + + Returns + ------- + fcluster : ndarray + An array of length ``n``. ``T[i]`` is the flat cluster number to + which original observation ``i`` belongs. + + See Also + -------- + linkage : for information about hierarchical clustering methods work. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster + >>> from scipy.spatial.distance import pdist + + All cluster linkage methods - e.g., `scipy.cluster.hierarchy.ward` + generate a linkage matrix ``Z`` as their output: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + This matrix represents a dendrogram, where the first and second elements + are the two clusters merged at each step, the third element is the + distance between these clusters, and the fourth element is the size of + the new cluster - the number of original data points included. + + `scipy.cluster.hierarchy.fcluster` can be used to flatten the + dendrogram, obtaining as a result an assignation of the original data + points to single clusters. + + This assignation mostly depends on a distance threshold ``t`` - the maximum + inter-cluster distance allowed: + + >>> fcluster(Z, t=0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + + >>> fcluster(Z, t=1.1, criterion='distance') + array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32) + + >>> fcluster(Z, t=3, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + >>> fcluster(Z, t=9, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + In the first case, the threshold ``t`` is too small to allow any two + samples in the data to form a cluster, so 12 different clusters are + returned. + + In the second case, the threshold is large enough to allow the first + 4 points to be merged with their nearest neighbors. So, here, only 8 + clusters are returned. + + The third case, with a much higher threshold, allows for up to 8 data + points to be connected - so 4 clusters are returned here. + + Lastly, the threshold of the fourth case is large enough to allow for + all data points to be merged together - so a single cluster is returned. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + n = Z.shape[0] + 1 + T = np.zeros((n,), dtype='i') + + if monocrit is not None: + monocrit = np.asarray(monocrit, order='C', dtype=np.float64) + + Z = np.asarray(Z) + monocrit = np.asarray(monocrit) + if criterion == 'inconsistent': + if R is None: + R = inconsistent(Z, depth) + else: + R = _asarray(R, order='C', dtype=xp.float64, xp=xp) + is_valid_im(R, throw=True, name='R') + # Since the C code does not support striding using strides. + # The dimensions are used instead. + R = np.asarray(R) + _hierarchy.cluster_in(Z, R, T, float(t), int(n)) + elif criterion == 'distance': + _hierarchy.cluster_dist(Z, T, float(t), int(n)) + elif criterion == 'maxclust': + _hierarchy.cluster_maxclust_dist(Z, T, int(n), t) + elif criterion == 'monocrit': + _hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n)) + elif criterion == 'maxclust_monocrit': + _hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t)) + else: + raise ValueError('Invalid cluster formation criterion: %s' + % str(criterion)) + return xp.asarray(T) + + +def fclusterdata(X, t, criterion='inconsistent', + metric='euclidean', depth=2, method='single', R=None): + """ + Cluster observation data using a given metric. + + Clusters the original observations in the n-by-m data + matrix X (n observations in m dimensions), using the euclidean + distance metric to calculate distances between original observations, + performs hierarchical clustering using the single linkage algorithm, + and forms flat clusters using the inconsistency method with `t` as the + cut-off threshold. + + A 1-D array ``T`` of length ``n`` is returned. ``T[i]`` is + the index of the flat cluster to which the original observation ``i`` + belongs. + + Parameters + ---------- + X : (N, M) ndarray + N by M data matrix with N observations in M dimensions. + t : scalar + For criteria 'inconsistent', 'distance' or 'monocrit', + this is the threshold to apply when forming flat clusters. + For 'maxclust' or 'maxclust_monocrit' criteria, + this would be max number of clusters requested. + criterion : str, optional + Specifies the criterion for forming flat clusters. Valid + values are 'inconsistent' (default), 'distance', or 'maxclust' + cluster formation algorithms. See `fcluster` for descriptions. + metric : str or function, optional + The distance metric for calculating pairwise distances. See + ``distance.pdist`` for descriptions and linkage to verify + compatibility with the linkage method. + depth : int, optional + The maximum depth for the inconsistency calculation. See + `inconsistent` for more information. + method : str, optional + The linkage method to use (single, complete, average, + weighted, median centroid, ward). See `linkage` for more + information. Default is "single". + R : ndarray, optional + The inconsistency matrix. It will be computed if necessary + if it is not passed. + + Returns + ------- + fclusterdata : ndarray + A vector of length n. T[i] is the flat cluster number to + which original observation i belongs. + + See Also + -------- + scipy.spatial.distance.pdist : pairwise distance metrics + + Notes + ----- + This function is similar to the MATLAB function ``clusterdata``. + + Examples + -------- + >>> from scipy.cluster.hierarchy import fclusterdata + + This is a convenience method that abstracts all the steps to perform in a + typical SciPy's hierarchical clustering workflow. + + * Transform the input data into a condensed matrix with + `scipy.spatial.distance.pdist`. + + * Apply a clustering method. + + * Obtain flat clusters at a user defined distance threshold ``t`` using + `scipy.cluster.hierarchy.fcluster`. + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> fclusterdata(X, t=1) + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + + The output here (for the dataset ``X``, distance threshold ``t``, and the + default settings) is four clusters with three data points each. + + """ + xp = array_namespace(X) + X = _asarray(X, order='C', dtype=xp.float64, xp=xp) + + if X.ndim != 2: + raise TypeError('The observation matrix X must be an n by m ' + 'array.') + + Y = distance.pdist(X, metric=metric) + Y = xp.asarray(Y) + Z = linkage(Y, method=method) + if R is None: + R = inconsistent(Z, d=depth) + else: + R = _asarray(R, order='c', xp=xp) + T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t) + return T + + +def leaves_list(Z): + """ + Return a list of leaf node ids. + + The return corresponds to the observation vector index as it appears + in the tree from left to right. Z is a linkage matrix. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. `Z` is + a linkage matrix. See `linkage` for more information. + + Returns + ------- + leaves_list : ndarray + The list of leaf node ids. + + See Also + -------- + dendrogram : for information about dendrogram structure. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, dendrogram, leaves_list + >>> from scipy.spatial.distance import pdist + >>> from matplotlib import pyplot as plt + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + The linkage matrix ``Z`` represents a dendrogram, that is, a tree that + encodes the structure of the clustering performed. + `scipy.cluster.hierarchy.leaves_list` shows the mapping between + indices in the ``X`` dataset and leaves in the dendrogram: + + >>> leaves_list(Z) + array([ 2, 0, 1, 5, 3, 4, 8, 6, 7, 11, 9, 10], dtype=int32) + + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + >>> plt.show() + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + n = Z.shape[0] + 1 + ML = np.zeros((n,), dtype='i') + Z = np.asarray(Z) + _hierarchy.prelist(Z, ML, n) + return xp.asarray(ML) + + +# Maps number of leaves to text size. +# +# p <= 20, size="12" +# 20 < p <= 30, size="10" +# 30 < p <= 50, size="8" +# 50 < p <= np.inf, size="6" + +_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5} +_drotation = {20: 0, 40: 45, np.inf: 90} +_dtextsortedkeys = list(_dtextsizes.keys()) +_dtextsortedkeys.sort() +_drotationsortedkeys = list(_drotation.keys()) +_drotationsortedkeys.sort() + + +def _remove_dups(L): + """ + Remove duplicates AND preserve the original order of the elements. + + The set class is not guaranteed to do this. + """ + seen_before = set() + L2 = [] + for i in L: + if i not in seen_before: + seen_before.add(i) + L2.append(i) + return L2 + + +def _get_tick_text_size(p): + for k in _dtextsortedkeys: + if p <= k: + return _dtextsizes[k] + + +def _get_tick_rotation(p): + for k in _drotationsortedkeys: + if p <= k: + return _drotation[k] + + +def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation, + no_labels, color_list, leaf_font_size=None, + leaf_rotation=None, contraction_marks=None, + ax=None, above_threshold_color='C0'): + # Import matplotlib here so that it's not imported unless dendrograms + # are plotted. Raise an informative error if importing fails. + try: + # if an axis is provided, don't use pylab at all + if ax is None: + import matplotlib.pylab + import matplotlib.patches + import matplotlib.collections + except ImportError as e: + raise ImportError("You must install the matplotlib library to plot " + "the dendrogram. Use no_plot=True to calculate the " + "dendrogram without plotting.") from e + + if ax is None: + ax = matplotlib.pylab.gca() + # if we're using pylab, we want to trigger a draw at the end + trigger_redraw = True + else: + trigger_redraw = False + + # Independent variable plot width + ivw = len(ivl) * 10 + # Dependent variable plot height + dvw = mh + mh * 0.05 + + iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10) + if orientation in ('top', 'bottom'): + if orientation == 'top': + ax.set_ylim([0, dvw]) + ax.set_xlim([0, ivw]) + else: + ax.set_ylim([dvw, 0]) + ax.set_xlim([0, ivw]) + + xlines = icoords + ylines = dcoords + if no_labels: + ax.set_xticks([]) + ax.set_xticklabels([]) + else: + ax.set_xticks(iv_ticks) + + if orientation == 'top': + ax.xaxis.set_ticks_position('bottom') + else: + ax.xaxis.set_ticks_position('top') + + # Make the tick marks invisible because they cover up the links + for line in ax.get_xticklines(): + line.set_visible(False) + + leaf_rot = (float(_get_tick_rotation(len(ivl))) + if (leaf_rotation is None) else leaf_rotation) + leaf_font = (float(_get_tick_text_size(len(ivl))) + if (leaf_font_size is None) else leaf_font_size) + ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font) + + elif orientation in ('left', 'right'): + if orientation == 'left': + ax.set_xlim([dvw, 0]) + ax.set_ylim([0, ivw]) + else: + ax.set_xlim([0, dvw]) + ax.set_ylim([0, ivw]) + + xlines = dcoords + ylines = icoords + if no_labels: + ax.set_yticks([]) + ax.set_yticklabels([]) + else: + ax.set_yticks(iv_ticks) + + if orientation == 'left': + ax.yaxis.set_ticks_position('right') + else: + ax.yaxis.set_ticks_position('left') + + # Make the tick marks invisible because they cover up the links + for line in ax.get_yticklines(): + line.set_visible(False) + + leaf_font = (float(_get_tick_text_size(len(ivl))) + if (leaf_font_size is None) else leaf_font_size) + + if leaf_rotation is not None: + ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font) + else: + ax.set_yticklabels(ivl, size=leaf_font) + + # Let's use collections instead. This way there is a separate legend item + # for each tree grouping, rather than stupidly one for each line segment. + colors_used = _remove_dups(color_list) + color_to_lines = {} + for color in colors_used: + color_to_lines[color] = [] + for (xline, yline, color) in zip(xlines, ylines, color_list): + color_to_lines[color].append(list(zip(xline, yline))) + + colors_to_collections = {} + # Construct the collections. + for color in colors_used: + coll = matplotlib.collections.LineCollection(color_to_lines[color], + colors=(color,)) + colors_to_collections[color] = coll + + # Add all the groupings below the color threshold. + for color in colors_used: + if color != above_threshold_color: + ax.add_collection(colors_to_collections[color]) + # If there's a grouping of links above the color threshold, it goes last. + if above_threshold_color in colors_to_collections: + ax.add_collection(colors_to_collections[above_threshold_color]) + + if contraction_marks is not None: + Ellipse = matplotlib.patches.Ellipse + for (x, y) in contraction_marks: + if orientation in ('left', 'right'): + e = Ellipse((y, x), width=dvw / 100, height=1.0) + else: + e = Ellipse((x, y), width=1.0, height=dvw / 100) + ax.add_artist(e) + e.set_clip_box(ax.bbox) + e.set_alpha(0.5) + e.set_facecolor('k') + + if trigger_redraw: + matplotlib.pylab.draw_if_interactive() + + +# C0 is used for above threshold color +_link_line_colors_default = ('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9') +_link_line_colors = list(_link_line_colors_default) + + +def set_link_color_palette(palette): + """ + Set list of matplotlib color codes for use by dendrogram. + + Note that this palette is global (i.e., setting it once changes the colors + for all subsequent calls to `dendrogram`) and that it affects only the + the colors below ``color_threshold``. + + Note that `dendrogram` also accepts a custom coloring function through its + ``link_color_func`` keyword, which is more flexible and non-global. + + Parameters + ---------- + palette : list of str or None + A list of matplotlib color codes. The order of the color codes is the + order in which the colors are cycled through when color thresholding in + the dendrogram. + + If ``None``, resets the palette to its default (which are matplotlib + default colors C1 to C9). + + Returns + ------- + None + + See Also + -------- + dendrogram + + Notes + ----- + Ability to reset the palette with ``None`` added in SciPy 0.17.0. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., + ... 400., 754., 564., 138., 219., 869., 669.]) + >>> Z = hierarchy.linkage(ytdist, 'single') + >>> dn = hierarchy.dendrogram(Z, no_plot=True) + >>> dn['color_list'] + ['C1', 'C0', 'C0', 'C0', 'C0'] + >>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k']) + >>> dn = hierarchy.dendrogram(Z, no_plot=True, above_threshold_color='b') + >>> dn['color_list'] + ['c', 'b', 'b', 'b', 'b'] + >>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267, + ... above_threshold_color='k') + >>> dn['color_list'] + ['c', 'm', 'm', 'k', 'k'] + + Now, reset the color palette to its default: + + >>> hierarchy.set_link_color_palette(None) + + """ + if palette is None: + # reset to its default + palette = _link_line_colors_default + elif not isinstance(palette, (list, tuple)): + raise TypeError("palette must be a list or tuple") + _ptypes = [isinstance(p, str) for p in palette] + + if False in _ptypes: + raise TypeError("all palette list elements must be color strings") + + global _link_line_colors + _link_line_colors = palette + + +def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None, + get_leaves=True, orientation='top', labels=None, + count_sort=False, distance_sort=False, show_leaf_counts=True, + no_plot=False, no_labels=False, leaf_font_size=None, + leaf_rotation=None, leaf_label_func=None, + show_contracted=False, link_color_func=None, ax=None, + above_threshold_color='C0'): + """ + Plot the hierarchical clustering as a dendrogram. + + The dendrogram illustrates how each cluster is + composed by drawing a U-shaped link between a non-singleton + cluster and its children. The top of the U-link indicates a + cluster merge. The two legs of the U-link indicate which clusters + were merged. The length of the two legs of the U-link represents + the distance between the child clusters. It is also the + cophenetic distance between original observations in the two + children clusters. + + Parameters + ---------- + Z : ndarray + The linkage matrix encoding the hierarchical clustering to + render as a dendrogram. See the ``linkage`` function for more + information on the format of ``Z``. + p : int, optional + The ``p`` parameter for ``truncate_mode``. + truncate_mode : str, optional + The dendrogram can be hard to read when the original + observation matrix from which the linkage is derived is + large. Truncation is used to condense the dendrogram. There + are several modes: + + ``None`` + No truncation is performed (default). + Note: ``'none'`` is an alias for ``None`` that's kept for + backward compatibility. + + ``'lastp'`` + The last ``p`` non-singleton clusters formed in the linkage are the + only non-leaf nodes in the linkage; they correspond to rows + ``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are + contracted into leaf nodes. + + ``'level'`` + No more than ``p`` levels of the dendrogram tree are displayed. + A "level" includes all nodes with ``p`` merges from the final merge. + + Note: ``'mtica'`` is an alias for ``'level'`` that's kept for + backward compatibility. + + color_threshold : double, optional + For brevity, let :math:`t` be the ``color_threshold``. + Colors all the descendent links below a cluster node + :math:`k` the same color if :math:`k` is the first node below + the cut threshold :math:`t`. All links connecting nodes with + distances greater than or equal to the threshold are colored + with de default matplotlib color ``'C0'``. If :math:`t` is less + than or equal to zero, all nodes are colored ``'C0'``. + If ``color_threshold`` is None or 'default', + corresponding with MATLAB(TM) behavior, the threshold is set to + ``0.7*max(Z[:,2])``. + + get_leaves : bool, optional + Includes a list ``R['leaves']=H`` in the result + dictionary. For each :math:`i`, ``H[i] == j``, cluster node + ``j`` appears in position ``i`` in the left-to-right traversal + of the leaves, where :math:`j < 2n-1` and :math:`i < n`. + orientation : str, optional + The direction to plot the dendrogram, which can be any + of the following strings: + + ``'top'`` + Plots the root at the top, and plot descendent links going downwards. + (default). + + ``'bottom'`` + Plots the root at the bottom, and plot descendent links going + upwards. + + ``'left'`` + Plots the root at the left, and plot descendent links going right. + + ``'right'`` + Plots the root at the right, and plot descendent links going left. + + labels : ndarray, optional + By default, ``labels`` is None so the index of the original observation + is used to label the leaf nodes. Otherwise, this is an :math:`n`-sized + sequence, with ``n == Z.shape[0] + 1``. The ``labels[i]`` value is the + text to put under the :math:`i` th leaf node only if it corresponds to + an original observation and not a non-singleton cluster. + count_sort : str or bool, optional + For each node n, the order (visually, from left-to-right) n's + two descendent links are plotted is determined by this + parameter, which can be any of the following values: + + ``False`` + Nothing is done. + + ``'ascending'`` or ``True`` + The child with the minimum number of original objects in its cluster + is plotted first. + + ``'descending'`` + The child with the maximum number of original objects in its cluster + is plotted first. + + Note, ``distance_sort`` and ``count_sort`` cannot both be True. + distance_sort : str or bool, optional + For each node n, the order (visually, from left-to-right) n's + two descendent links are plotted is determined by this + parameter, which can be any of the following values: + + ``False`` + Nothing is done. + + ``'ascending'`` or ``True`` + The child with the minimum distance between its direct descendents is + plotted first. + + ``'descending'`` + The child with the maximum distance between its direct descendents is + plotted first. + + Note ``distance_sort`` and ``count_sort`` cannot both be True. + show_leaf_counts : bool, optional + When True, leaf nodes representing :math:`k>1` original + observation are labeled with the number of observations they + contain in parentheses. + no_plot : bool, optional + When True, the final rendering is not performed. This is + useful if only the data structures computed for the rendering + are needed or if matplotlib is not available. + no_labels : bool, optional + When True, no labels appear next to the leaf nodes in the + rendering of the dendrogram. + leaf_rotation : double, optional + Specifies the angle (in degrees) to rotate the leaf + labels. When unspecified, the rotation is based on the number of + nodes in the dendrogram (default is 0). + leaf_font_size : int, optional + Specifies the font size (in points) of the leaf labels. When + unspecified, the size based on the number of nodes in the + dendrogram. + leaf_label_func : lambda or function, optional + When ``leaf_label_func`` is a callable function, for each + leaf with cluster index :math:`k < 2n-1`. The function + is expected to return a string with the label for the + leaf. + + Indices :math:`k < n` correspond to original observations + while indices :math:`k \\geq n` correspond to non-singleton + clusters. + + For example, to label singletons with their node id and + non-singletons with their id, count, and inconsistency + coefficient, simply do:: + + # First define the leaf label function. + def llf(id): + if id < n: + return str(id) + else: + return '[%d %d %1.2f]' % (id, count, R[n-id,3]) + + # The text for the leaf nodes is going to be big so force + # a rotation of 90 degrees. + dendrogram(Z, leaf_label_func=llf, leaf_rotation=90) + + # leaf_label_func can also be used together with ``truncate_mode``, + # in which case you will get your leaves labeled after truncation: + dendrogram(Z, leaf_label_func=llf, leaf_rotation=90, + truncate_mode='level', p=2) + + show_contracted : bool, optional + When True the heights of non-singleton nodes contracted + into a leaf node are plotted as crosses along the link + connecting that leaf node. This really is only useful when + truncation is used (see ``truncate_mode`` parameter). + link_color_func : callable, optional + If given, `link_color_function` is called with each non-singleton id + corresponding to each U-shaped link it will paint. The function is + expected to return the color to paint the link, encoded as a matplotlib + color string code. For example:: + + dendrogram(Z, link_color_func=lambda k: colors[k]) + + colors the direct links below each untruncated non-singleton node + ``k`` using ``colors[k]``. + ax : matplotlib Axes instance, optional + If None and `no_plot` is not True, the dendrogram will be plotted + on the current axes. Otherwise if `no_plot` is not True the + dendrogram will be plotted on the given ``Axes`` instance. This can be + useful if the dendrogram is part of a more complex figure. + above_threshold_color : str, optional + This matplotlib color string sets the color of the links above the + color_threshold. The default is ``'C0'``. + + Returns + ------- + R : dict + A dictionary of data structures computed to render the + dendrogram. Its has the following keys: + + ``'color_list'`` + A list of color names. The k'th element represents the color of the + k'th link. + + ``'icoord'`` and ``'dcoord'`` + Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]`` + where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]`` + where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is + ``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``. + + ``'ivl'`` + A list of labels corresponding to the leaf nodes. + + ``'leaves'`` + For each i, ``H[i] == j``, cluster node ``j`` appears in position + ``i`` in the left-to-right traversal of the leaves, where + :math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the + ``i``-th leaf node corresponds to an original observation. + Otherwise, it corresponds to a non-singleton cluster. + + ``'leaves_color_list'`` + A list of color names. The k'th element represents the color of the + k'th leaf. + + See Also + -------- + linkage, set_link_color_palette + + Notes + ----- + It is expected that the distances in ``Z[:,2]`` be monotonic, otherwise + crossings appear in the dendrogram. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> import matplotlib.pyplot as plt + + A very basic example: + + >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., + ... 400., 754., 564., 138., 219., 869., 669.]) + >>> Z = hierarchy.linkage(ytdist, 'single') + >>> plt.figure() + >>> dn = hierarchy.dendrogram(Z) + + Now, plot in given axes, improve the color scheme and use both vertical and + horizontal orientations: + + >>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k']) + >>> fig, axes = plt.subplots(1, 2, figsize=(8, 3)) + >>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y', + ... orientation='top') + >>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], + ... above_threshold_color='#bcbddc', + ... orientation='right') + >>> hierarchy.set_link_color_palette(None) # reset to default after use + >>> plt.show() + + """ + # This feature was thought about but never implemented (still useful?): + # + # ... = dendrogram(..., leaves_order=None) + # + # Plots the leaves in the order specified by a vector of + # original observation indices. If the vector contains duplicates + # or results in a crossing, an exception will be thrown. Passing + # None orders leaf nodes based on the order they appear in the + # pre-order traversal. + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + + if orientation not in ["top", "left", "bottom", "right"]: + raise ValueError("orientation must be one of 'top', 'left', " + "'bottom', or 'right'") + + if labels is not None: + try: + len_labels = len(labels) + except (TypeError, AttributeError): + len_labels = labels.shape[0] + if Z.shape[0] + 1 != len_labels: + raise ValueError("Dimensions of Z and labels must be consistent.") + + is_valid_linkage(Z, throw=True, name='Z') + Zs = Z.shape + n = Zs[0] + 1 + if isinstance(p, (int, float)): + p = int(p) + else: + raise TypeError('The second argument must be a number') + + if truncate_mode not in ('lastp', 'mtica', 'level', 'none', None): + # 'mtica' is kept working for backwards compat. + raise ValueError('Invalid truncation mode.') + + if truncate_mode == 'lastp': + if p > n or p == 0: + p = n + + if truncate_mode == 'mtica': + # 'mtica' is an alias + truncate_mode = 'level' + + if truncate_mode == 'level': + if p <= 0: + p = np.inf + + if get_leaves: + lvs = [] + else: + lvs = None + + icoord_list = [] + dcoord_list = [] + color_list = [] + current_color = [0] + currently_below_threshold = [False] + ivl = [] # list of leaves + + if color_threshold is None or (isinstance(color_threshold, str) and + color_threshold == 'default'): + color_threshold = max(Z[:, 2]) * 0.7 + + R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl, + 'leaves': lvs, 'color_list': color_list} + + # Empty list will be filled in _dendrogram_calculate_info + contraction_marks = [] if show_contracted else None + + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=2*n - 2, + iv=0.0, + ivl=ivl, + n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, + lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + if not no_plot: + mh = max(Z[:, 2]) + _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation, + no_labels, color_list, + leaf_font_size=leaf_font_size, + leaf_rotation=leaf_rotation, + contraction_marks=contraction_marks, + ax=ax, + above_threshold_color=above_threshold_color) + + R["leaves_color_list"] = _get_leaves_color_list(R) + + return R + + +def _get_leaves_color_list(R): + leaves_color_list = [None] * len(R['leaves']) + for link_x, link_y, link_color in zip(R['icoord'], + R['dcoord'], + R['color_list']): + for (xi, yi) in zip(link_x, link_y): + if yi == 0.0 and (xi % 5 == 0 and xi % 2 == 1): + # if yi is 0.0 and xi is divisible by 5 and odd, + # the point is a leaf + # xi of leaves are 5, 15, 25, 35, ... (see `iv_ticks`) + # index of leaves are 0, 1, 2, 3, ... as below + leaf_index = (int(xi) - 5) // 10 + # each leaf has a same color of its link. + leaves_color_list[leaf_index] = link_color + return leaves_color_list + + +def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, + i, labels): + # If the leaf id structure is not None and is a list then the caller + # to dendrogram has indicated that cluster id's corresponding to the + # leaf nodes should be recorded. + + if lvs is not None: + lvs.append(int(i)) + + # If leaf node labels are to be displayed... + if ivl is not None: + # If a leaf_label_func has been provided, the label comes from the + # string returned from the leaf_label_func, which is a function + # passed to dendrogram. + if leaf_label_func: + ivl.append(leaf_label_func(int(i))) + else: + # Otherwise, if the dendrogram caller has passed a labels list + # for the leaf nodes, use it. + if labels is not None: + ivl.append(labels[int(i - n)]) + else: + # Otherwise, use the id as the label for the leaf.x + ivl.append(str(int(i))) + + +def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, + i, labels, show_leaf_counts): + # If the leaf id structure is not None and is a list then the caller + # to dendrogram has indicated that cluster id's corresponding to the + # leaf nodes should be recorded. + + if lvs is not None: + lvs.append(int(i)) + if ivl is not None: + if leaf_label_func: + ivl.append(leaf_label_func(int(i))) + else: + if show_leaf_counts: + ivl.append("(" + str(np.asarray(Z[i - n, 3], dtype=np.int64)) + ")") + else: + ivl.append("") + + +def _append_contraction_marks(Z, iv, i, n, contraction_marks, xp): + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 0], xp), + n, contraction_marks, xp) + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 1], xp), + n, contraction_marks, xp) + + +def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks, xp): + if i >= n: + contraction_marks.append((iv, Z[i - n, 2])) + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 0], xp), + n, contraction_marks, xp) + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 1], xp), + n, contraction_marks, xp) + + +def _dendrogram_calculate_info(Z, p, truncate_mode, + color_threshold=np.inf, get_leaves=True, + orientation='top', labels=None, + count_sort=False, distance_sort=False, + show_leaf_counts=False, i=-1, iv=0.0, + ivl=[], n=0, icoord_list=[], dcoord_list=[], + lvs=None, mhr=False, + current_color=[], color_list=[], + currently_below_threshold=[], + leaf_label_func=None, level=0, + contraction_marks=None, + link_color_func=None, + above_threshold_color='C0'): + """ + Calculate the endpoints of the links as well as the labels for the + the dendrogram rooted at the node with index i. iv is the independent + variable value to plot the left-most leaf node below the root node i + (if orientation='top', this would be the left-most x value where the + plotting of this root node i and its descendents should begin). + + ivl is a list to store the labels of the leaf nodes. The leaf_label_func + is called whenever ivl != None, labels == None, and + leaf_label_func != None. When ivl != None and labels != None, the + labels list is used only for labeling the leaf nodes. When + ivl == None, no labels are generated for leaf nodes. + + When get_leaves==True, a list of leaves is built as they are visited + in the dendrogram. + + Returns a tuple with l being the independent variable coordinate that + corresponds to the midpoint of cluster to the left of cluster i if + i is non-singleton, otherwise the independent coordinate of the leaf + node if i is a leaf node. + + Returns + ------- + A tuple (left, w, h, md), where: + * left is the independent variable coordinate of the center of the + the U of the subtree + + * w is the amount of space used for the subtree (in independent + variable units) + + * h is the height of the subtree in dependent variable units + + * md is the ``max(Z[*,2]``) for all nodes ``*`` below and including + the target node. + + """ + xp = array_namespace(Z) + if n == 0: + raise ValueError("Invalid singleton cluster count n.") + + if i == -1: + raise ValueError("Invalid root cluster index i.") + + if truncate_mode == 'lastp': + # If the node is a leaf node but corresponds to a non-singleton + # cluster, its label is either the empty string or the number of + # original observations belonging to cluster i. + if 2*n - p > i >= n: + d = Z[i - n, 2] + _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels, + show_leaf_counts) + if contraction_marks is not None: + _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks, xp) + return (iv + 5.0, 10.0, 0.0, d) + elif i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + elif truncate_mode == 'level': + if i > n and level > p: + d = Z[i - n, 2] + _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels, + show_leaf_counts) + if contraction_marks is not None: + _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks, xp) + return (iv + 5.0, 10.0, 0.0, d) + elif i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + + # Otherwise, only truncate if we have a leaf node. + # + # Only place leaves if they correspond to original observations. + if i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + + # !!! Otherwise, we don't have a leaf node, so work on plotting a + # non-leaf node. + # Actual indices of a and b + aa = int_floor(Z[i - n, 0], xp) + ab = int_floor(Z[i - n, 1], xp) + if aa >= n: + # The number of singletons below cluster a + na = Z[aa - n, 3] + # The distance between a's two direct children. + da = Z[aa - n, 2] + else: + na = 1 + da = 0.0 + if ab >= n: + nb = Z[ab - n, 3] + db = Z[ab - n, 2] + else: + nb = 1 + db = 0.0 + + if count_sort == 'ascending' or count_sort is True: + # If a has a count greater than b, it and its descendents should + # be drawn to the right. Otherwise, to the left. + if na > nb: + # The cluster index to draw to the left (ua) will be ab + # and the one to draw to the right (ub) will be aa + ua = ab + ub = aa + else: + ua = aa + ub = ab + elif count_sort == 'descending': + # If a has a count less than or equal to b, it and its + # descendents should be drawn to the left. Otherwise, to + # the right. + if na > nb: + ua = aa + ub = ab + else: + ua = ab + ub = aa + elif distance_sort == 'ascending' or distance_sort is True: + # If a has a distance greater than b, it and its descendents should + # be drawn to the right. Otherwise, to the left. + if da > db: + ua = ab + ub = aa + else: + ua = aa + ub = ab + elif distance_sort == 'descending': + # If a has a distance less than or equal to b, it and its + # descendents should be drawn to the left. Otherwise, to + # the right. + if da > db: + ua = aa + ub = ab + else: + ua = ab + ub = aa + else: + ua = aa + ub = ab + + # Updated iv variable and the amount of space used. + (uiva, uwa, uah, uamd) = \ + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=ua, iv=iv, ivl=ivl, n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + level=level + 1, contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + h = Z[i - n, 2] + if h >= color_threshold or color_threshold <= 0: + c = above_threshold_color + + if currently_below_threshold[0]: + current_color[0] = (current_color[0] + 1) % len(_link_line_colors) + currently_below_threshold[0] = False + else: + currently_below_threshold[0] = True + c = _link_line_colors[current_color[0]] + + (uivb, uwb, ubh, ubmd) = \ + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=ub, iv=iv + uwa, ivl=ivl, n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + level=level + 1, contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + max_dist = max(uamd, ubmd, h) + + icoord_list.append([uiva, uiva, uivb, uivb]) + dcoord_list.append([uah, h, h, ubh]) + if link_color_func is not None: + v = link_color_func(int(i)) + if not isinstance(v, str): + raise TypeError("link_color_func must return a matplotlib " + "color string!") + color_list.append(v) + else: + color_list.append(c) + + return (((uiva + uivb) / 2), uwa + uwb, h, max_dist) + + +def is_isomorphic(T1, T2): + """ + Determine if two different cluster assignments are equivalent. + + Parameters + ---------- + T1 : array_like + An assignment of singleton cluster ids to flat cluster ids. + T2 : array_like + An assignment of singleton cluster ids to flat cluster ids. + + Returns + ------- + b : bool + Whether the flat cluster assignments `T1` and `T2` are + equivalent. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + fcluster : for the creation of flat cluster assignments. + + Examples + -------- + >>> from scipy.cluster.hierarchy import fcluster, is_isomorphic + >>> from scipy.cluster.hierarchy import single, complete + >>> from scipy.spatial.distance import pdist + + Two flat cluster assignments can be isomorphic if they represent the same + cluster assignment, with different labels. + + For example, we can use the `scipy.cluster.hierarchy.single`: method + and flatten the output to four clusters: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = single(pdist(X)) + >>> T = fcluster(Z, 1, criterion='distance') + >>> T + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + + We can then do the same using the + `scipy.cluster.hierarchy.complete`: method: + + >>> Z = complete(pdist(X)) + >>> T_ = fcluster(Z, 1.5, criterion='distance') + >>> T_ + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + As we can see, in both cases we obtain four clusters and all the data + points are distributed in the same way - the only thing that changes + are the flat cluster labels (3 => 1, 4 =>2, 2 =>3 and 4 =>1), so both + cluster assignments are isomorphic: + + >>> is_isomorphic(T, T_) + True + + """ + T1 = np.asarray(T1, order='c') + T2 = np.asarray(T2, order='c') + + T1S = T1.shape + T2S = T2.shape + + if len(T1S) != 1: + raise ValueError('T1 must be one-dimensional.') + if len(T2S) != 1: + raise ValueError('T2 must be one-dimensional.') + if T1S[0] != T2S[0]: + raise ValueError('T1 and T2 must have the same number of elements.') + n = T1S[0] + d1 = {} + d2 = {} + for i in range(0, n): + if T1[i] in d1: + if T2[i] not in d2: + return False + if d1[T1[i]] != T2[i] or d2[T2[i]] != T1[i]: + return False + elif T2[i] in d2: + return False + else: + d1[T1[i]] = T2[i] + d2[T2[i]] = T1[i] + return True + + +def maxdists(Z): + """ + Return the maximum distance between any non-singleton cluster. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + ``linkage`` for more information. + + Returns + ------- + maxdists : ndarray + A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents + the maximum distance between any cluster (including + singletons) below and including the node with index i. More + specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the + set of all node indices below and including node i. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + is_monotonic : for testing for monotonicity of a linkage matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, maxdists + >>> from scipy.spatial.distance import pdist + + Given a linkage matrix ``Z``, `scipy.cluster.hierarchy.maxdists` + computes for each new cluster generated (i.e., for each row of the linkage + matrix) what is the maximum distance between any two child clusters. + + Due to the nature of hierarchical clustering, in many cases this is going + to be just the distance between the two child clusters that were merged + to form the current one - that is, Z[:,2]. + + However, for non-monotonic cluster assignments such as + `scipy.cluster.hierarchy.median` clustering this is not always the + case: There may be cluster formations were the distance between the two + clusters merged is smaller than the distance between their children. + + We can see this in an example: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> maxdists(Z) + array([1. , 1. , 1. , 1. , 1.11803399, + 1.11803399, 1.11803399, 1.11803399, 3. , 3.5 , + 3.5 ]) + + Note that while the distance between the two clusters merged when creating the + last cluster is 3.25, there are two children (clusters 16 and 17) whose distance + is larger (3.5). Thus, `scipy.cluster.hierarchy.maxdists` returns 3.5 in + this case. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + n = Z.shape[0] + 1 + MD = np.zeros((n - 1,)) + Z = np.asarray(Z) + _hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n)) + MD = xp.asarray(MD) + return MD + + +def maxinconsts(Z, R): + """ + Return the maximum inconsistency coefficient for each + non-singleton cluster and its children. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + `linkage` for more information. + R : ndarray + The inconsistency matrix. + + Returns + ------- + MI : ndarray + A monotonic ``(n-1)``-sized numpy array of doubles. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + inconsistent : for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, inconsistent, maxinconsts + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> R = inconsistent(Z) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.74535599, 1.08655358, 3. , 1.15470054], + [1.91202266, 1.37522872, 3. , 1.15470054], + [3.25 , 0.25 , 3. , 0. ]]) + + Here, `scipy.cluster.hierarchy.maxinconsts` can be used to compute + the maximum value of the inconsistency statistic (the last column of + ``R``) for each non-singleton cluster and its children: + + >>> maxinconsts(Z, R) + array([0. , 0. , 0. , 0. , 0.70710678, + 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054, + 1.15470054]) + + """ + xp = array_namespace(Z, R) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + R = _asarray(R, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + is_valid_im(R, throw=True, name='R') + + n = Z.shape[0] + 1 + if Z.shape[0] != R.shape[0]: + raise ValueError("The inconsistency matrix and linkage matrix each " + "have a different number of rows.") + MI = np.zeros((n - 1,)) + Z = np.asarray(Z) + R = np.asarray(R) + _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3) + MI = xp.asarray(MI) + return MI + + +def maxRstat(Z, R, i): + """ + Return the maximum statistic for each non-singleton cluster and its + children. + + Parameters + ---------- + Z : array_like + The hierarchical clustering encoded as a matrix. See `linkage` for more + information. + R : array_like + The inconsistency matrix. + i : int + The column of `R` to use as the statistic. + + Returns + ------- + MR : ndarray + Calculates the maximum statistic for the i'th column of the + inconsistency matrix `R` for each non-singleton cluster + node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]``, where + ``Q(j)`` the set of all node ids corresponding to nodes below + and including ``j``. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + inconsistent : for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> R = inconsistent(Z) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.74535599, 1.08655358, 3. , 1.15470054], + [1.91202266, 1.37522872, 3. , 1.15470054], + [3.25 , 0.25 , 3. , 0. ]]) + + `scipy.cluster.hierarchy.maxRstat` can be used to compute + the maximum value of each column of ``R``, for each non-singleton + cluster and its children: + + >>> maxRstat(Z, R, 0) + array([1. , 1. , 1. , 1. , 1.05901699, + 1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266, + 3.25 ]) + >>> maxRstat(Z, R, 1) + array([0. , 0. , 0. , 0. , 0.08346263, + 0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872, + 1.37522872]) + >>> maxRstat(Z, R, 3) + array([0. , 0. , 0. , 0. , 0.70710678, + 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054, + 1.15470054]) + + """ + xp = array_namespace(Z, R) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + R = _asarray(R, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + is_valid_im(R, throw=True, name='R') + + if not isinstance(i, int): + raise TypeError('The third argument must be an integer.') + + if i < 0 or i > 3: + raise ValueError('i must be an integer between 0 and 3 inclusive.') + + if Z.shape[0] != R.shape[0]: + raise ValueError("The inconsistency matrix and linkage matrix each " + "have a different number of rows.") + + n = Z.shape[0] + 1 + MR = np.zeros((n - 1,)) + Z = np.asarray(Z) + R = np.asarray(R) + _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i) + MR = xp.asarray(MR) + return MR + + +def leaders(Z, T): + """ + Return the root nodes in a hierarchical clustering. + + Returns the root nodes in a hierarchical clustering corresponding + to a cut defined by a flat cluster assignment vector ``T``. See + the ``fcluster`` function for more information on the format of ``T``. + + For each flat cluster :math:`j` of the :math:`k` flat clusters + represented in the n-sized flat cluster assignment vector ``T``, + this function finds the lowest cluster node :math:`i` in the linkage + tree Z, such that: + + * leaf descendants belong only to flat cluster j + (i.e., ``T[p]==j`` for all :math:`p` in :math:`S(i)`, where + :math:`S(i)` is the set of leaf ids of descendant leaf nodes + with cluster node :math:`i`) + + * there does not exist a leaf that is not a descendant with + :math:`i` that also belongs to cluster :math:`j` + (i.e., ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If + this condition is violated, ``T`` is not a valid cluster + assignment vector, and an exception will be thrown. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + `linkage` for more information. + T : ndarray + The flat cluster assignment vector. + + Returns + ------- + L : ndarray + The leader linkage node id's stored as a k-element 1-D array, + where ``k`` is the number of flat clusters found in ``T``. + + ``L[j]=i`` is the linkage cluster node id that is the + leader of flat cluster with id M[j]. If ``i < n``, ``i`` + corresponds to an original observation, otherwise it + corresponds to a non-singleton cluster. + M : ndarray + The leader linkage node id's stored as a k-element 1-D array, where + ``k`` is the number of flat clusters found in ``T``. This allows the + set of flat cluster ids to be any arbitrary set of ``k`` integers. + + For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with + id 8's leader is linkage node 2. + + See Also + -------- + fcluster : for the creation of flat cluster assignments. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster, leaders + >>> from scipy.spatial.distance import pdist + + Given a linkage matrix ``Z`` - obtained after apply a clustering method + to a dataset ``X`` - and a flat cluster assignment array ``T``: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + >>> T = fcluster(Z, 3, criterion='distance') + >>> T + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + `scipy.cluster.hierarchy.leaders` returns the indices of the nodes + in the dendrogram that are the leaders of each flat cluster: + + >>> L, M = leaders(Z, T) + >>> L + array([16, 17, 18, 19], dtype=int32) + + (remember that indices 0-11 point to the 12 data points in ``X``, + whereas indices 12-22 point to the 11 rows of ``Z``) + + `scipy.cluster.hierarchy.leaders` also returns the indices of + the flat clusters in ``T``: + + >>> M + array([1, 2, 3, 4], dtype=int32) + + """ + xp = array_namespace(Z, T) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + T = _asarray(T, order='C', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + if T.dtype != xp.int32: + raise TypeError('T must be a 1-D array of dtype int32.') + + if T.shape[0] != Z.shape[0] + 1: + raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.') + + n_clusters = int(xp.unique_values(T).shape[0]) + n_obs = int(Z.shape[0] + 1) + L = np.zeros(n_clusters, dtype=np.int32) + M = np.zeros(n_clusters, dtype=np.int32) + Z = np.asarray(Z) + T = np.asarray(T, dtype=np.int32) + s = _hierarchy.leaders(Z, T, L, M, n_clusters, n_obs) + if s >= 0: + raise ValueError(('T is not a valid assignment vector. Error found ' + 'when examining linkage node %d (< 2n-1).') % s) + L, M = xp.asarray(L), xp.asarray(M) + return (L, M) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0d98f58aa2eef7d010ad9c44c780919a1ea8fa5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6c7c51142a7e8094215a7c8d0f2bc934d4c8e07 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..552c2510a1aed2236e4d89da41d98f92cee11817 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6a8c24c0d1cc5f2fb5aee2fb84e910b8366b1d6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..feb9b2d3a8c999bd8519acfbd72d32aa52d9f12c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..7d874ca5eb7141a44559307d1c28dd412171396f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py @@ -0,0 +1,145 @@ +from numpy import array + + +Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02], + [7.50205180e-01, 4.60299830e-01, 8.98696460e-01], + [6.65461230e-01, 6.94011420e-01, 9.10465700e-01], + [9.64047590e-01, 1.43082200e-03, 7.39874220e-01], + [1.08159060e-01, 5.53028790e-01, 6.63804780e-02], + [9.31359130e-01, 8.25424910e-01, 9.52315440e-01], + [6.78086960e-01, 3.41903970e-01, 5.61481950e-01], + [9.82730940e-01, 7.04605210e-01, 8.70978630e-02], + [6.14691610e-01, 4.69989230e-02, 6.02406450e-01], + [5.80161260e-01, 9.17354970e-01, 5.88163850e-01], + [1.38246310e+00, 1.96358160e+00, 1.94437880e+00], + [2.10675860e+00, 1.67148730e+00, 1.34854480e+00], + [1.39880070e+00, 1.66142050e+00, 1.32224550e+00], + [1.71410460e+00, 1.49176380e+00, 1.45432170e+00], + [1.54102340e+00, 1.84374950e+00, 1.64658950e+00], + [2.08512480e+00, 1.84524350e+00, 2.17340850e+00], + [1.30748740e+00, 1.53801650e+00, 2.16007740e+00], + [1.41447700e+00, 1.99329070e+00, 1.99107420e+00], + [1.61943490e+00, 1.47703280e+00, 1.89788160e+00], + [1.59880600e+00, 1.54988980e+00, 1.57563350e+00], + [3.37247380e+00, 2.69635310e+00, 3.39981700e+00], + [3.13705120e+00, 3.36528090e+00, 3.06089070e+00], + [3.29413250e+00, 3.19619500e+00, 2.90700170e+00], + [2.65510510e+00, 3.06785900e+00, 2.97198540e+00], + [3.30941040e+00, 2.59283970e+00, 2.57714110e+00], + [2.59557220e+00, 3.33477370e+00, 3.08793190e+00], + [2.58206180e+00, 3.41615670e+00, 3.26441990e+00], + [2.71127000e+00, 2.77032450e+00, 2.63466500e+00], + [2.79617850e+00, 3.25473720e+00, 3.41801560e+00], + [2.64741750e+00, 2.54538040e+00, 3.25354110e+00]]) + +ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754., + 564., 138., 219., 869., 669.]) + +linkage_ytdist_single = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]]) + +linkage_ytdist_complete = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [1., 6., 400., 3.], + [0., 7., 412., 3.], + [8., 9., 996., 6.]]) + +linkage_ytdist_average = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 333.5, 3.], + [1., 6., 347.5, 3.], + [8., 9., 680.77777778, 6.]]) + +linkage_ytdist_weighted = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 333.5, 3.], + [1., 6., 347.5, 3.], + [8., 9., 670.125, 6.]]) + +# the optimal leaf ordering of linkage_ytdist_single +linkage_ytdist_single_olo = array([[5., 2., 138., 2.], + [4., 3., 219., 2.], + [7., 0., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]]) + +X = array([[1.43054825, -7.5693489], + [6.95887839, 6.82293382], + [2.87137846, -9.68248579], + [7.87974764, -6.05485803], + [8.24018364, -6.09495602], + [7.39020262, 8.54004355]]) + +linkage_X_centroid = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 6.43614494, 4.], + [7., 9., 15.17363237, 6.]]) + +linkage_X_median = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 6.43614494, 4.], + [7., 9., 15.17363237, 6.]]) + +linkage_X_ward = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 9.10208346, 4.], + [7., 9., 24.7784379, 6.]]) + +# the optimal leaf ordering of linkage_X_ward +linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.], + [5., 1., 1.77045373, 2.], + [2., 0., 2.55760419, 2.], + [6., 8., 9.10208346, 4.], + [7., 9., 24.7784379, 6.]]) + +inconsistent_ytdist = { + 1: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [255., 0., 1., 0.], + [268., 0., 1., 0.], + [295., 0., 1., 0.]]), + 2: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [261.5, 9.19238816, 2., 0.70710678], + [233.66666667, 83.9424406, 3., 0.7306594]]), + 3: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [247.33333333, 25.38372182, 3., 0.81417007], + [239., 69.36377537, 4., 0.80733783]]), + 4: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [247.33333333, 25.38372182, 3., 0.81417007], + [235., 60.73302232, 5., 0.98793042]])} + +fcluster_inconsistent = { + 0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} + +fcluster_distance = { + 0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3, + 1, 1, 1, 2, 1, 1, 1, 1, 1]), + 1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} + +fcluster_maxclust = { + 8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4, + 1, 1, 1, 3, 1, 1, 1, 1, 2]), + 4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py new file mode 100644 index 0000000000000000000000000000000000000000..a73512d35eef168f625a1942a87d248e73a71aa2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py @@ -0,0 +1,202 @@ +import pytest +from pytest import raises as assert_raises +import numpy as np +from scipy.cluster.hierarchy import DisjointSet +import string + + +def generate_random_token(): + k = len(string.ascii_letters) + tokens = list(np.arange(k, dtype=int)) + tokens += list(np.arange(k, dtype=float)) + tokens += list(string.ascii_letters) + tokens += [None for i in range(k)] + tokens = np.array(tokens, dtype=object) + rng = np.random.RandomState(seed=0) + + while 1: + size = rng.randint(1, 3) + element = rng.choice(tokens, size) + if size == 1: + yield element[0] + else: + yield tuple(element) + + +def get_elements(n): + # dict is deterministic without difficulty of comparing numpy ints + elements = {} + for element in generate_random_token(): + if element not in elements: + elements[element] = len(elements) + if len(elements) >= n: + break + return list(elements.keys()) + + +def test_init(): + n = 10 + elements = get_elements(n) + dis = DisjointSet(elements) + assert dis.n_subsets == n + assert list(dis) == elements + + +def test_len(): + n = 10 + elements = get_elements(n) + dis = DisjointSet(elements) + assert len(dis) == n + + dis.add("dummy") + assert len(dis) == n + 1 + + +@pytest.mark.parametrize("n", [10, 100]) +def test_contains(n): + elements = get_elements(n) + dis = DisjointSet(elements) + for x in elements: + assert x in dis + + assert "dummy" not in dis + + +@pytest.mark.parametrize("n", [10, 100]) +def test_add(n): + elements = get_elements(n) + dis1 = DisjointSet(elements) + + dis2 = DisjointSet() + for i, x in enumerate(elements): + dis2.add(x) + assert len(dis2) == i + 1 + + # test idempotency by adding element again + dis2.add(x) + assert len(dis2) == i + 1 + + assert list(dis1) == list(dis2) + + +def test_element_not_present(): + elements = get_elements(n=10) + dis = DisjointSet(elements) + + with assert_raises(KeyError): + dis["dummy"] + + with assert_raises(KeyError): + dis.merge(elements[0], "dummy") + + with assert_raises(KeyError): + dis.connected(elements[0], "dummy") + + +@pytest.mark.parametrize("direction", ["forwards", "backwards"]) +@pytest.mark.parametrize("n", [10, 100]) +def test_linear_union_sequence(n, direction): + elements = get_elements(n) + dis = DisjointSet(elements) + assert elements == list(dis) + + indices = list(range(n - 1)) + if direction == "backwards": + indices = indices[::-1] + + for it, i in enumerate(indices): + assert not dis.connected(elements[i], elements[i + 1]) + assert dis.merge(elements[i], elements[i + 1]) + assert dis.connected(elements[i], elements[i + 1]) + assert dis.n_subsets == n - 1 - it + + roots = [dis[i] for i in elements] + if direction == "forwards": + assert all(elements[0] == r for r in roots) + else: + assert all(elements[-2] == r for r in roots) + assert not dis.merge(elements[0], elements[-1]) + + +@pytest.mark.parametrize("n", [10, 100]) +def test_self_unions(n): + elements = get_elements(n) + dis = DisjointSet(elements) + + for x in elements: + assert dis.connected(x, x) + assert not dis.merge(x, x) + assert dis.connected(x, x) + assert dis.n_subsets == len(elements) + + assert elements == list(dis) + roots = [dis[x] for x in elements] + assert elements == roots + + +@pytest.mark.parametrize("order", ["ab", "ba"]) +@pytest.mark.parametrize("n", [10, 100]) +def test_equal_size_ordering(n, order): + elements = get_elements(n) + dis = DisjointSet(elements) + + rng = np.random.RandomState(seed=0) + indices = np.arange(n) + rng.shuffle(indices) + + for i in range(0, len(indices), 2): + a, b = elements[indices[i]], elements[indices[i + 1]] + if order == "ab": + assert dis.merge(a, b) + else: + assert dis.merge(b, a) + + expected = elements[min(indices[i], indices[i + 1])] + assert dis[a] == expected + assert dis[b] == expected + + +@pytest.mark.parametrize("kmax", [5, 10]) +def test_binary_tree(kmax): + n = 2**kmax + elements = get_elements(n) + dis = DisjointSet(elements) + rng = np.random.RandomState(seed=0) + + for k in 2**np.arange(kmax): + for i in range(0, n, 2 * k): + r1, r2 = rng.randint(0, k, size=2) + a, b = elements[i + r1], elements[i + k + r2] + assert not dis.connected(a, b) + assert dis.merge(a, b) + assert dis.connected(a, b) + + assert elements == list(dis) + roots = [dis[i] for i in elements] + expected_indices = np.arange(n) - np.arange(n) % (2 * k) + expected = [elements[i] for i in expected_indices] + assert roots == expected + + +@pytest.mark.parametrize("n", [10, 100]) +def test_subsets(n): + elements = get_elements(n) + dis = DisjointSet(elements) + + rng = np.random.RandomState(seed=0) + for i, j in rng.randint(0, n, (n, 2)): + x = elements[i] + y = elements[j] + + expected = {element for element in dis if {dis[element]} == {dis[x]}} + assert dis.subset_size(x) == len(dis.subset(x)) + assert expected == dis.subset(x) + + expected = {dis[element]: set() for element in dis} + for element in dis: + expected[dis[element]].add(element) + expected = list(expected.values()) + assert expected == dis.subsets() + + dis.merge(x, y) + assert dis.subset(x) == dis.subset(y) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py new file mode 100644 index 0000000000000000000000000000000000000000..c474bee6f4b943a1dd9047374f5791a728ccd8be --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py @@ -0,0 +1,1225 @@ +# +# Author: Damian Eads +# Date: April 17, 2008 +# +# Copyright (C) 2008 Damian Eads +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns +import pytest +from pytest import raises as assert_raises + +import scipy.cluster.hierarchy +from scipy.cluster.hierarchy import ( + ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage, + num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster, + is_isomorphic, single, leaders, + correspond, is_monotonic, maxdists, maxinconsts, maxRstat, + is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram, + set_link_color_palette, cut_tree, optimal_leaf_ordering, + _order_cluster_tree, _hierarchy, _LINKAGE_METHODS) +from scipy.spatial.distance import pdist +from scipy.cluster._hierarchy import Heap +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import xp_assert_close, xp_assert_equal + +from . import hierarchy_test_data + + +# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so +# check if it's available +try: + import matplotlib + # and set the backend to be Agg (no gui) + matplotlib.use('Agg') + # before importing pyplot + import matplotlib.pyplot as plt + have_matplotlib = True +except Exception: + have_matplotlib = False + + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")] +skip_if_array_api = pytest.mark.skip_if_array_api + + +class TestLinkage: + + @skip_if_array_api(cpu_only=True) + def test_linkage_non_finite_elements_in_distance_matrix(self, xp): + # Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf). + # Exception expected. + y = xp.zeros((6,)) + y[0] = xp.nan + assert_raises(ValueError, linkage, y) + + @skip_if_array_api(cpu_only=True) + def test_linkage_empty_distance_matrix(self, xp): + # Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected. + y = xp.zeros((0,)) + assert_raises(ValueError, linkage, y) + + @skip_if_array_api(cpu_only=True) + def test_linkage_tdist(self, xp): + for method in ['single', 'complete', 'average', 'weighted']: + self.check_linkage_tdist(method, xp) + + def check_linkage_tdist(self, method, xp): + # Tests linkage(Y, method) on the tdist data set. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), method) + expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method) + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10) + + @skip_if_array_api(cpu_only=True) + def test_linkage_X(self, xp): + for method in ['centroid', 'median', 'ward']: + self.check_linkage_q(method, xp) + + def check_linkage_q(self, method, xp): + # Tests linkage(Y, method) on the Q data set. + Z = linkage(xp.asarray(hierarchy_test_data.X), method) + expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method) + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + y = scipy.spatial.distance.pdist(hierarchy_test_data.X, + metric="euclidean") + Z = linkage(xp.asarray(y), method) + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + @skip_if_array_api(cpu_only=True) + def test_compare_with_trivial(self, xp): + rng = np.random.RandomState(0) + n = 20 + X = rng.rand(n, 2) + d = pdist(X) + + for method, code in _LINKAGE_METHODS.items(): + Z_trivial = _hierarchy.linkage(d, n, code) + Z = linkage(xp.asarray(d), method) + xp_assert_close(Z, xp.asarray(Z_trivial), rtol=1e-14, atol=1e-15) + + @skip_if_array_api(cpu_only=True) + def test_optimal_leaf_ordering(self, xp): + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), optimal_ordering=True) + expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo') + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10) + + +@skip_if_array_api(cpu_only=True) +class TestLinkageTies: + + _expectations = { + 'single': np.array([[0, 1, 1.41421356, 2], + [2, 3, 1.41421356, 3]]), + 'complete': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.82842712, 3]]), + 'average': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'weighted': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'centroid': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'median': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'ward': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.44948974, 3]]), + } + + def test_linkage_ties(self, xp): + for method in ['single', 'complete', 'average', 'weighted', + 'centroid', 'median', 'ward']: + self.check_linkage_ties(method, xp) + + def check_linkage_ties(self, method, xp): + X = xp.asarray([[-1, -1], [0, 0], [1, 1]]) + Z = linkage(X, method=method) + expectedZ = self._expectations[method] + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + +@skip_if_array_api(cpu_only=True) +class TestInconsistent: + + def test_inconsistent_tdist(self, xp): + for depth in hierarchy_test_data.inconsistent_ytdist: + self.check_inconsistent_tdist(depth, xp) + + def check_inconsistent_tdist(self, depth, xp): + Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single) + xp_assert_close(inconsistent(Z, depth), + xp.asarray(hierarchy_test_data.inconsistent_ytdist[depth])) + + +@skip_if_array_api(cpu_only=True) +class TestCopheneticDistance: + + def test_linkage_cophenet_tdist_Z(self, xp): + # Tests cophenet(Z) on tdist data set. + expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, + 295, 138, 219, 295, 295]) + Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single) + M = cophenet(Z) + xp_assert_close(M, xp.asarray(expectedM, dtype=xp.float64), atol=1e-10) + + def test_linkage_cophenet_tdist_Z_Y(self, xp): + # Tests cophenet(Z, Y) on tdist data set. + Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single) + (c, M) = cophenet(Z, xp.asarray(hierarchy_test_data.ytdist)) + expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, + 295, 138, 219, 295, 295], dtype=xp.float64) + expectedc = xp.asarray(0.639931296433393415057366837573, dtype=xp.float64)[()] + xp_assert_close(c, expectedc, atol=1e-10) + xp_assert_close(M, expectedM, atol=1e-10) + + +class TestMLabLinkageConversion: + + def test_mlab_linkage_conversion_empty(self, xp): + # Tests from/to_mlab_linkage on empty linkage array. + X = xp.asarray([], dtype=xp.float64) + xp_assert_equal(from_mlab_linkage(X), X) + xp_assert_equal(to_mlab_linkage(X), X) + + @skip_if_array_api(cpu_only=True) + def test_mlab_linkage_conversion_single_row(self, xp): + # Tests from/to_mlab_linkage on linkage array with single row. + Z = xp.asarray([[0., 1., 3., 2.]]) + Zm = xp.asarray([[1, 2, 3]]) + xp_assert_close(from_mlab_linkage(Zm), xp.asarray(Z, dtype=xp.float64), + rtol=1e-15) + xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64), + rtol=1e-15) + + @skip_if_array_api(cpu_only=True) + def test_mlab_linkage_conversion_multiple_rows(self, xp): + # Tests from/to_mlab_linkage on linkage array with multiple rows. + Zm = xp.asarray([[3, 6, 138], [4, 5, 219], + [1, 8, 255], [2, 9, 268], [7, 10, 295]]) + Z = xp.asarray([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]], + dtype=xp.float64) + xp_assert_close(from_mlab_linkage(Zm), Z, rtol=1e-15) + xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64), + rtol=1e-15) + + +@skip_if_array_api(cpu_only=True) +class TestFcluster: + + def test_fclusterdata(self, xp): + for t in hierarchy_test_data.fcluster_inconsistent: + self.check_fclusterdata(t, 'inconsistent', xp) + for t in hierarchy_test_data.fcluster_distance: + self.check_fclusterdata(t, 'distance', xp) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fclusterdata(t, 'maxclust', xp) + + def check_fclusterdata(self, t, criterion, xp): + # Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set + expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]) + X = xp.asarray(hierarchy_test_data.Q_X) + T = fclusterdata(X, criterion=criterion, t=t) + assert_(is_isomorphic(T, expectedT)) + + def test_fcluster(self, xp): + for t in hierarchy_test_data.fcluster_inconsistent: + self.check_fcluster(t, 'inconsistent', xp) + for t in hierarchy_test_data.fcluster_distance: + self.check_fcluster(t, 'distance', xp) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fcluster(t, 'maxclust', xp) + + def check_fcluster(self, t, criterion, xp): + # Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set. + expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]) + Z = single(xp.asarray(hierarchy_test_data.Q_X)) + T = fcluster(Z, criterion=criterion, t=t) + assert_(is_isomorphic(T, expectedT)) + + def test_fcluster_monocrit(self, xp): + for t in hierarchy_test_data.fcluster_distance: + self.check_fcluster_monocrit(t, xp) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fcluster_maxclust_monocrit(t, xp) + + def check_fcluster_monocrit(self, t, xp): + expectedT = xp.asarray(hierarchy_test_data.fcluster_distance[t]) + Z = single(xp.asarray(hierarchy_test_data.Q_X)) + T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z)) + assert_(is_isomorphic(T, expectedT)) + + def check_fcluster_maxclust_monocrit(self, t, xp): + expectedT = xp.asarray(hierarchy_test_data.fcluster_maxclust[t]) + Z = single(xp.asarray(hierarchy_test_data.Q_X)) + T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z)) + assert_(is_isomorphic(T, expectedT)) + + +@skip_if_array_api(cpu_only=True) +class TestLeaders: + + def test_leaders_single(self, xp): + # Tests leaders using a flat clustering generated by single linkage. + X = hierarchy_test_data.Q_X + Y = pdist(X) + Y = xp.asarray(Y) + Z = linkage(Y) + T = fcluster(Z, criterion='maxclust', t=3) + Lright = (xp.asarray([53, 55, 56]), xp.asarray([2, 3, 1])) + T = xp.asarray(T, dtype=xp.int32) + L = leaders(Z, T) + assert_allclose(np.concatenate(L), np.concatenate(Lright), rtol=1e-15) + + +@skip_if_array_api(np_only=True, + reasons=['`is_isomorphic` only supports NumPy backend']) +class TestIsIsomorphic: + + @skip_if_array_api(np_only=True, + reasons=['array-likes only supported for NumPy backend']) + def test_array_like(self, xp): + assert is_isomorphic([1, 1, 1], [2, 2, 2]) + assert is_isomorphic([], []) + + def test_is_isomorphic_1(self, xp): + # Tests is_isomorphic on test case #1 (one flat cluster, different labellings) + a = xp.asarray([1, 1, 1]) + b = xp.asarray([2, 2, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_2(self, xp): + # Tests is_isomorphic on test case #2 (two flat clusters, different labelings) + a = xp.asarray([1, 7, 1]) + b = xp.asarray([2, 3, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_3(self, xp): + # Tests is_isomorphic on test case #3 (no flat clusters) + a = xp.asarray([]) + b = xp.asarray([]) + assert is_isomorphic(a, b) + + def test_is_isomorphic_4A(self, xp): + # Tests is_isomorphic on test case #4A + # (3 flat clusters, different labelings, isomorphic) + a = xp.asarray([1, 2, 3]) + b = xp.asarray([1, 3, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_4B(self, xp): + # Tests is_isomorphic on test case #4B + # (3 flat clusters, different labelings, nonisomorphic) + a = xp.asarray([1, 2, 3, 3]) + b = xp.asarray([1, 3, 2, 3]) + assert is_isomorphic(a, b) is False + assert is_isomorphic(b, a) is False + + def test_is_isomorphic_4C(self, xp): + # Tests is_isomorphic on test case #4C + # (3 flat clusters, different labelings, isomorphic) + a = xp.asarray([7, 2, 3]) + b = xp.asarray([6, 3, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_5(self, xp): + # Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random + # clusters, random permutation of the labeling). + for nc in [2, 3, 5]: + self.help_is_isomorphic_randperm(1000, nc, xp=xp) + + def test_is_isomorphic_6(self, xp): + # Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random + # clusters, random permutation of the labeling, slightly + # nonisomorphic.) + for nc in [2, 3, 5]: + self.help_is_isomorphic_randperm(1000, nc, True, 5, xp=xp) + + def test_is_isomorphic_7(self, xp): + # Regression test for gh-6271 + a = xp.asarray([1, 2, 3]) + b = xp.asarray([1, 1, 1]) + assert not is_isomorphic(a, b) + + def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0, + *, xp): + for k in range(3): + a = (np.random.rand(nobs) * nclusters).astype(int) + b = np.zeros(a.size, dtype=int) + P = np.random.permutation(nclusters) + for i in range(0, a.shape[0]): + b[i] = P[a[i]] + if noniso: + Q = np.random.permutation(nobs) + b[Q[0:nerrors]] += 1 + b[Q[0:nerrors]] %= nclusters + a = xp.asarray(a) + b = xp.asarray(b) + assert is_isomorphic(a, b) == (not noniso) + assert is_isomorphic(b, a) == (not noniso) + + +@skip_if_array_api(cpu_only=True) +class TestIsValidLinkage: + + def test_is_valid_linkage_various_size(self, xp): + for nrow, ncol, valid in [(2, 5, False), (2, 3, False), + (1, 4, True), (2, 4, True)]: + self.check_is_valid_linkage_various_size(nrow, ncol, valid, xp) + + def check_is_valid_linkage_various_size(self, nrow, ncol, valid, xp): + # Tests is_valid_linkage(Z) with linkage matrices of various sizes + Z = xp.asarray([[0, 1, 3.0, 2, 5], + [3, 2, 4.0, 3, 3]], dtype=xp.float64) + Z = Z[:nrow, :ncol] + assert_(is_valid_linkage(Z) == valid) + if not valid: + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_int_type(self, xp): + # Tests is_valid_linkage(Z) with integer type. + Z = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.int64) + assert_(is_valid_linkage(Z) is False) + assert_raises(TypeError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_empty(self, xp): + # Tests is_valid_linkage(Z) with empty linkage. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_(is_valid_linkage(Z) is True) + + def test_is_valid_linkage_4_and_up_neg_index_left(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative indices (left). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,0] = -2 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up_neg_index_right(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative indices (right). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,1] = -2 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up_neg_dist(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative distances. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,2] = -0.5 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up_neg_counts(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative counts. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,3] = -2 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + +@skip_if_array_api(cpu_only=True) +class TestIsValidInconsistent: + + def test_is_valid_im_int_type(self, xp): + # Tests is_valid_im(R) with integer type. + R = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.int64) + assert_(is_valid_im(R) is False) + assert_raises(TypeError, is_valid_im, R, throw=True) + + def test_is_valid_im_various_size(self, xp): + for nrow, ncol, valid in [(2, 5, False), (2, 3, False), + (1, 4, True), (2, 4, True)]: + self.check_is_valid_im_various_size(nrow, ncol, valid, xp) + + def check_is_valid_im_various_size(self, nrow, ncol, valid, xp): + # Tests is_valid_im(R) with linkage matrices of various sizes + R = xp.asarray([[0, 1, 3.0, 2, 5], + [3, 2, 4.0, 3, 3]], dtype=xp.float64) + R = R[:nrow, :ncol] + assert_(is_valid_im(R) == valid) + if not valid: + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_empty(self, xp): + # Tests is_valid_im(R) with empty inconsistency matrix. + R = xp.zeros((0, 4), dtype=xp.float64) + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + assert_(is_valid_im(R) is True) + + def test_is_valid_im_4_and_up_neg_index_left(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link height means. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,0] = -2.0 + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up_neg_index_right(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link height standard deviations. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,1] = -2.0 + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up_neg_dist(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link counts. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,2] = -0.5 + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + +class TestNumObsLinkage: + + @skip_if_array_api(cpu_only=True) + def test_num_obs_linkage_empty(self, xp): + # Tests num_obs_linkage(Z) with empty linkage. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, num_obs_linkage, Z) + + def test_num_obs_linkage_1x4(self, xp): + # Tests num_obs_linkage(Z) on linkage over 2 observations. + Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64) + assert_equal(num_obs_linkage(Z), 2) + + def test_num_obs_linkage_2x4(self, xp): + # Tests num_obs_linkage(Z) on linkage over 3 observations. + Z = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.float64) + assert_equal(num_obs_linkage(Z), 3) + + @skip_if_array_api(cpu_only=True) + def test_num_obs_linkage_4_and_up(self, xp): + # Tests num_obs_linkage(Z) on linkage on observation sets between sizes + # 4 and 15 (step size 3). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_equal(num_obs_linkage(Z), i) + + +@skip_if_array_api(cpu_only=True) +class TestLeavesList: + + def test_leaves_list_1x4(self, xp): + # Tests leaves_list(Z) on a 1x4 linkage. + Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64) + to_tree(Z) + assert_allclose(leaves_list(Z), [0, 1], rtol=1e-15) + + def test_leaves_list_2x4(self, xp): + # Tests leaves_list(Z) on a 2x4 linkage. + Z = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.float64) + to_tree(Z) + assert_allclose(leaves_list(Z), [0, 1, 2], rtol=1e-15) + + def test_leaves_list_Q(self, xp): + for method in ['single', 'complete', 'average', 'weighted', 'centroid', + 'median', 'ward']: + self.check_leaves_list_Q(method, xp) + + def check_leaves_list_Q(self, method, xp): + # Tests leaves_list(Z) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + node = to_tree(Z) + assert_allclose(node.pre_order(), leaves_list(Z), rtol=1e-15) + + def test_Q_subtree_pre_order(self, xp): + # Tests that pre_order() works when called on sub-trees. + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, 'single') + node = to_tree(Z) + assert_allclose(node.pre_order(), (node.get_left().pre_order() + + node.get_right().pre_order()), + rtol=1e-15) + + +@skip_if_array_api(cpu_only=True) +class TestCorrespond: + + def test_correspond_empty(self, xp): + # Tests correspond(Z, y) with empty linkage and condensed distance matrix. + y = xp.zeros((0,), dtype=xp.float64) + Z = xp.zeros((0,4), dtype=xp.float64) + assert_raises(ValueError, correspond, Z, y) + + def test_correspond_2_and_up(self, xp): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. + for i in range(2, 4): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_(correspond(Z, y)) + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_(correspond(Z, y)) + + def test_correspond_4_and_up(self, xp): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. Correspondence should be false. + for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) + + list(zip(list(range(3, 5)), list(range(2, 4))))): + y = np.random.rand(i*(i-1)//2) + y2 = np.random.rand(j*(j-1)//2) + y = xp.asarray(y) + y2 = xp.asarray(y2) + Z = linkage(y) + Z2 = linkage(y2) + assert not correspond(Z, y2) + assert not correspond(Z2, y) + + def test_correspond_4_and_up_2(self, xp): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. Correspondence should be false. + for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) + + list(zip(list(range(2, 7)), list(range(16, 21))))): + y = np.random.rand(i*(i-1)//2) + y2 = np.random.rand(j*(j-1)//2) + y = xp.asarray(y) + y2 = xp.asarray(y2) + Z = linkage(y) + Z2 = linkage(y2) + assert not correspond(Z, y2) + assert not correspond(Z2, y) + + def test_num_obs_linkage_multi_matrix(self, xp): + # Tests num_obs_linkage with observation matrices of multiple sizes. + for n in range(2, 10): + X = np.random.rand(n, 4) + Y = pdist(X) + Y = xp.asarray(Y) + Z = linkage(Y) + assert_equal(num_obs_linkage(Z), n) + + +@skip_if_array_api(cpu_only=True) +class TestIsMonotonic: + + def test_is_monotonic_empty(self, xp): + # Tests is_monotonic(Z) on an empty linkage. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, is_monotonic, Z) + + def test_is_monotonic_1x4(self, xp): + # Tests is_monotonic(Z) on 1x4 linkage. Expecting True. + Z = xp.asarray([[0, 1, 0.3, 2]], dtype=xp.float64) + assert is_monotonic(Z) + + def test_is_monotonic_2x4_T(self, xp): + # Tests is_monotonic(Z) on 2x4 linkage. Expecting True. + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 3]], dtype=xp.float64) + assert is_monotonic(Z) + + def test_is_monotonic_2x4_F(self, xp): + # Tests is_monotonic(Z) on 2x4 linkage. Expecting False. + Z = xp.asarray([[0, 1, 0.4, 2], + [2, 3, 0.3, 3]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_3x4_T(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage. Expecting True. + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 2], + [4, 5, 0.6, 4]], dtype=xp.float64) + assert is_monotonic(Z) + + def test_is_monotonic_3x4_F1(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False. + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.2, 2], + [4, 5, 0.6, 4]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_3x4_F2(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False. + Z = xp.asarray([[0, 1, 0.8, 2], + [2, 3, 0.4, 2], + [4, 5, 0.6, 4]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_3x4_F3(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 2], + [4, 5, 0.2, 4]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_tdist_linkage1(self, xp): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # tdist data set. Expecting True. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + assert is_monotonic(Z) + + def test_is_monotonic_tdist_linkage2(self, xp): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # tdist data set. Perturbing. Expecting False. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + Z[2,2] = 0.0 + assert not is_monotonic(Z) + + def test_is_monotonic_Q_linkage(self, xp): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # Q data set. Expecting True. + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, 'single') + assert is_monotonic(Z) + + +@skip_if_array_api(cpu_only=True) +class TestMaxDists: + + def test_maxdists_empty_linkage(self, xp): + # Tests maxdists(Z) on empty linkage. Expecting exception. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, maxdists, Z) + + def test_maxdists_one_cluster_linkage(self, xp): + # Tests maxdists(Z) on linkage with one cluster. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + MD = maxdists(Z) + expectedMD = calculate_maximum_distances(Z, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + def test_maxdists_Q_linkage(self, xp): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + self.check_maxdists_Q_linkage(method, xp) + + def check_maxdists_Q_linkage(self, method, xp): + # Tests maxdists(Z) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + MD = maxdists(Z) + expectedMD = calculate_maximum_distances(Z, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + +class TestMaxInconsts: + + @skip_if_array_api(cpu_only=True) + def test_maxinconsts_empty_linkage(self, xp): + # Tests maxinconsts(Z, R) on empty linkage. Expecting exception. + Z = xp.zeros((0, 4), dtype=xp.float64) + R = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, maxinconsts, Z, R) + + def test_maxinconsts_difrow_linkage(self, xp): + # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with + # different numbers of clusters. Expecting exception. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = np.random.rand(2, 4) + R = xp.asarray(R) + assert_raises(ValueError, maxinconsts, Z, R) + + @skip_if_array_api(cpu_only=True) + def test_maxinconsts_one_cluster_linkage(self, xp): + # Tests maxinconsts(Z, R) on linkage with one cluster. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) + MD = maxinconsts(Z, R) + expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + @skip_if_array_api(cpu_only=True) + def test_maxinconsts_Q_linkage(self, xp): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + self.check_maxinconsts_Q_linkage(method, xp) + + def check_maxinconsts_Q_linkage(self, method, xp): + # Tests maxinconsts(Z, R) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + R = inconsistent(Z) + MD = maxinconsts(Z, R) + expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + +class TestMaxRStat: + + def test_maxRstat_invalid_index(self, xp): + for i in [3.3, -1, 4]: + self.check_maxRstat_invalid_index(i, xp) + + def check_maxRstat_invalid_index(self, i, xp): + # Tests maxRstat(Z, R, i). Expecting exception. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) + if isinstance(i, int): + assert_raises(ValueError, maxRstat, Z, R, i) + else: + assert_raises(TypeError, maxRstat, Z, R, i) + + @skip_if_array_api(cpu_only=True) + def test_maxRstat_empty_linkage(self, xp): + for i in range(4): + self.check_maxRstat_empty_linkage(i, xp) + + def check_maxRstat_empty_linkage(self, i, xp): + # Tests maxRstat(Z, R, i) on empty linkage. Expecting exception. + Z = xp.zeros((0, 4), dtype=xp.float64) + R = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, maxRstat, Z, R, i) + + def test_maxRstat_difrow_linkage(self, xp): + for i in range(4): + self.check_maxRstat_difrow_linkage(i, xp) + + def check_maxRstat_difrow_linkage(self, i, xp): + # Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with + # different numbers of clusters. Expecting exception. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = np.random.rand(2, 4) + R = xp.asarray(R) + assert_raises(ValueError, maxRstat, Z, R, i) + + @skip_if_array_api(cpu_only=True) + def test_maxRstat_one_cluster_linkage(self, xp): + for i in range(4): + self.check_maxRstat_one_cluster_linkage(i, xp) + + def check_maxRstat_one_cluster_linkage(self, i, xp): + # Tests maxRstat(Z, R, i) on linkage with one cluster. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) + MD = maxRstat(Z, R, 1) + expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + @skip_if_array_api(cpu_only=True) + def test_maxRstat_Q_linkage(self, xp): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + for i in range(4): + self.check_maxRstat_Q_linkage(method, i, xp) + + def check_maxRstat_Q_linkage(self, method, i, xp): + # Tests maxRstat(Z, R, i) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + R = inconsistent(Z) + MD = maxRstat(Z, R, 1) + expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + +@skip_if_array_api(cpu_only=True) +class TestDendrogram: + + def test_dendrogram_single_linkage_tdist(self, xp): + # Tests dendrogram calculation on single linkage of the tdist data set. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + R = dendrogram(Z, no_plot=True) + leaves = R["leaves"] + assert_equal(leaves, [2, 5, 1, 0, 3, 4]) + + def test_valid_orientation(self, xp): + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + assert_raises(ValueError, dendrogram, Z, orientation="foo") + + def test_labels_as_array_or_list(self, xp): + # test for gh-12418 + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + labels = xp.asarray([1, 3, 2, 6, 4, 5]) + result1 = dendrogram(Z, labels=labels, no_plot=True) + result2 = dendrogram(Z, labels=list(labels), no_plot=True) + assert result1 == result2 + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_valid_label_size(self, xp): + link = xp.asarray([ + [0, 1, 1.0, 4], + [2, 3, 1.0, 5], + [4, 5, 2.0, 6], + ]) + plt.figure() + with pytest.raises(ValueError) as exc_info: + dendrogram(link, labels=list(range(100))) + assert "Dimensions of Z and labels must be consistent."\ + in str(exc_info.value) + + with pytest.raises( + ValueError, + match="Dimensions of Z and labels must be consistent."): + dendrogram(link, labels=[]) + + plt.close() + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_dendrogram_plot(self, xp): + for orientation in ['top', 'bottom', 'left', 'right']: + self.check_dendrogram_plot(orientation, xp) + + def check_dendrogram_plot(self, orientation, xp): + # Tests dendrogram plotting. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'], + 'dcoord': [[0.0, 138.0, 138.0, 0.0], + [0.0, 219.0, 219.0, 0.0], + [0.0, 255.0, 255.0, 219.0], + [0.0, 268.0, 268.0, 255.0], + [138.0, 295.0, 295.0, 268.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0], + [45.0, 45.0, 55.0, 55.0], + [35.0, 35.0, 50.0, 50.0], + [25.0, 25.0, 42.5, 42.5], + [10.0, 10.0, 33.75, 33.75]], + 'ivl': ['2', '5', '1', '0', '3', '4'], + 'leaves': [2, 5, 1, 0, 3, 4], + 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'], + } + + fig = plt.figure() + ax = fig.add_subplot(221) + + # test that dendrogram accepts ax keyword + R1 = dendrogram(Z, ax=ax, orientation=orientation) + R1['dcoord'] = np.asarray(R1['dcoord']) + assert_equal(R1, expected) + + # test that dendrogram accepts and handle the leaf_font_size and + # leaf_rotation keywords + dendrogram(Z, ax=ax, orientation=orientation, + leaf_font_size=20, leaf_rotation=90) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_rotation(), 90) + assert_equal(testlabel.get_size(), 20) + dendrogram(Z, ax=ax, orientation=orientation, + leaf_rotation=90) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_rotation(), 90) + dendrogram(Z, ax=ax, orientation=orientation, + leaf_font_size=20) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_size(), 20) + plt.close() + + # test plotting to gca (will import pylab) + R2 = dendrogram(Z, orientation=orientation) + plt.close() + R2['dcoord'] = np.asarray(R2['dcoord']) + assert_equal(R2, expected) + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_dendrogram_truncate_mode(self, xp): + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + + R = dendrogram(Z, 2, 'lastp', show_contracted=True) + plt.close() + R['dcoord'] = np.asarray(R['dcoord']) + assert_equal(R, {'color_list': ['C0'], + 'dcoord': [[0.0, 295.0, 295.0, 0.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0]], + 'ivl': ['(2)', '(4)'], + 'leaves': [6, 9], + 'leaves_color_list': ['C0', 'C0'], + }) + + R = dendrogram(Z, 2, 'mtica', show_contracted=True) + plt.close() + R['dcoord'] = np.asarray(R['dcoord']) + assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'], + 'dcoord': [[0.0, 138.0, 138.0, 0.0], + [0.0, 255.0, 255.0, 0.0], + [0.0, 268.0, 268.0, 255.0], + [138.0, 295.0, 295.0, 268.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0], + [35.0, 35.0, 45.0, 45.0], + [25.0, 25.0, 40.0, 40.0], + [10.0, 10.0, 32.5, 32.5]], + 'ivl': ['2', '5', '1', '0', '(2)'], + 'leaves': [2, 5, 1, 0, 7], + 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'], + }) + + def test_dendrogram_colors(self, xp): + # Tests dendrogram plots with alternate colors + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + + set_link_color_palette(['c', 'm', 'y', 'k']) + R = dendrogram(Z, no_plot=True, + above_threshold_color='g', color_threshold=250) + set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k']) + + color_list = R['color_list'] + assert_equal(color_list, ['c', 'm', 'g', 'g', 'g']) + + # reset color palette (global list) + set_link_color_palette(None) + + def test_dendrogram_leaf_colors_zero_dist(self, xp): + # tests that the colors of leafs are correct for tree + # with two identical points + x = xp.asarray([[1, 0, 0], + [0, 0, 1], + [0, 2, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 0]]) + z = linkage(x, "single") + d = dendrogram(z, no_plot=True) + exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2'] + colors = d["leaves_color_list"] + assert_equal(colors, exp_colors) + + def test_dendrogram_leaf_colors(self, xp): + # tests that the colors are correct for a tree + # with two near points ((0, 0, 1.1) and (0, 0, 1)) + x = xp.asarray([[1, 0, 0], + [0, 0, 1.1], + [0, 2, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 0]]) + z = linkage(x, "single") + d = dendrogram(z, no_plot=True) + exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2'] + colors = d["leaves_color_list"] + assert_equal(colors, exp_colors) + + +def calculate_maximum_distances(Z, xp): + # Used for testing correctness of maxdists. + n = Z.shape[0] + 1 + B = xp.zeros((n-1,), dtype=Z.dtype) + q = xp.zeros((3,)) + for i in range(0, n - 1): + q[:] = 0.0 + left = Z[i, 0] + right = Z[i, 1] + if left >= n: + q[0] = B[xp.asarray(left, dtype=xp.int64) - n] + if right >= n: + q[1] = B[xp.asarray(right, dtype=xp.int64) - n] + q[2] = Z[i, 2] + B[i] = xp.max(q) + return B + + +def calculate_maximum_inconsistencies(Z, R, k=3, xp=np): + # Used for testing correctness of maxinconsts. + n = Z.shape[0] + 1 + dtype = xp.result_type(Z, R) + B = xp.zeros((n-1,), dtype=dtype) + q = xp.zeros((3,)) + for i in range(0, n - 1): + q[:] = 0.0 + left = Z[i, 0] + right = Z[i, 1] + if left >= n: + q[0] = B[xp.asarray(left, dtype=xp.int64) - n] + if right >= n: + q[1] = B[xp.asarray(right, dtype=xp.int64) - n] + q[2] = R[i, k] + B[i] = xp.max(q) + return B + + +@skip_if_array_api(cpu_only=True) +def test_unsupported_uncondensed_distance_matrix_linkage_warning(xp): + assert_warns(ClusterWarning, linkage, xp.asarray([[0, 1], [1, 0]])) + + +def test_euclidean_linkage_value_error(xp): + for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS: + assert_raises(ValueError, linkage, xp.asarray([[1, 1], [1, 1]]), + method=method, metric='cityblock') + + +@skip_if_array_api(cpu_only=True) +def test_2x2_linkage(xp): + Z1 = linkage(xp.asarray([1]), method='single', metric='euclidean') + Z2 = linkage(xp.asarray([[0, 1], [0, 0]]), method='single', metric='euclidean') + xp_assert_close(Z1, Z2, rtol=1e-15) + + +@skip_if_array_api(cpu_only=True) +def test_node_compare(xp): + np.random.seed(23) + nobs = 50 + X = np.random.randn(nobs, 4) + X = xp.asarray(X) + Z = scipy.cluster.hierarchy.ward(X) + tree = to_tree(Z) + assert_(tree > tree.get_left()) + assert_(tree.get_right() > tree.get_left()) + assert_(tree.get_right() == tree.get_right()) + assert_(tree.get_right() != tree.get_left()) + + +@skip_if_array_api(np_only=True, reasons=['`cut_tree` uses non-standard indexing']) +def test_cut_tree(xp): + np.random.seed(23) + nobs = 50 + X = np.random.randn(nobs, 4) + X = xp.asarray(X) + Z = scipy.cluster.hierarchy.ward(X) + cutree = cut_tree(Z) + + # cutree.dtype varies between int32 and int64 over platforms + xp_assert_close(cutree[:, 0], xp.arange(nobs), rtol=1e-15, check_dtype=False) + xp_assert_close(cutree[:, -1], xp.zeros(nobs), rtol=1e-15, check_dtype=False) + assert_equal(np.asarray(cutree).max(0), np.arange(nobs - 1, -1, -1)) + + xp_assert_close(cutree[:, [-5]], cut_tree(Z, n_clusters=5), rtol=1e-15) + xp_assert_close(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]), rtol=1e-15) + xp_assert_close(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]), rtol=1e-15) + + nodes = _order_cluster_tree(Z) + heights = xp.asarray([node.dist for node in nodes]) + + xp_assert_close(cutree[:, np.searchsorted(heights, [5])], + cut_tree(Z, height=5), rtol=1e-15) + xp_assert_close(cutree[:, np.searchsorted(heights, [5, 10])], + cut_tree(Z, height=[5, 10]), rtol=1e-15) + xp_assert_close(cutree[:, np.searchsorted(heights, [10, 5])], + cut_tree(Z, height=[10, 5]), rtol=1e-15) + + +@skip_if_array_api(cpu_only=True) +def test_optimal_leaf_ordering(xp): + # test with the distance vector y + Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.ytdist)), + xp.asarray(hierarchy_test_data.ytdist)) + expectedZ = hierarchy_test_data.linkage_ytdist_single_olo + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10) + + # test with the observation matrix X + Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.X), 'ward'), + xp.asarray(hierarchy_test_data.X)) + expectedZ = hierarchy_test_data.linkage_X_ward_olo + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + +@skip_if_array_api(np_only=True, reasons=['`Heap` only supports NumPy backend']) +def test_Heap(xp): + values = xp.asarray([2, -1, 0, -1.5, 3]) + heap = Heap(values) + + pair = heap.get_min() + assert_equal(pair['key'], 3) + assert_equal(pair['value'], -1.5) + + heap.remove_min() + pair = heap.get_min() + assert_equal(pair['key'], 1) + assert_equal(pair['value'], -1) + + heap.change_value(1, 2.5) + pair = heap.get_min() + assert_equal(pair['key'], 2) + assert_equal(pair['value'], 0) + + heap.remove_min() + heap.remove_min() + + heap.change_value(1, 10) + pair = heap.get_min() + assert_equal(pair['key'], 4) + assert_equal(pair['value'], 3) + + heap.remove_min() + pair = heap.get_min() + assert_equal(pair['key'], 1) + assert_equal(pair['value'], 10) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py new file mode 100644 index 0000000000000000000000000000000000000000..10b1416b83b44a3f5f4c8e9af396fab21f6ae1f3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py @@ -0,0 +1,421 @@ +import warnings +import sys +from copy import deepcopy + +import numpy as np +from numpy.testing import ( + assert_array_equal, assert_equal, assert_, suppress_warnings +) +import pytest +from pytest import raises as assert_raises + +from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten, + ClusterError, _krandinit) +from scipy.cluster import _vq +from scipy.conftest import array_api_compatible +from scipy.sparse._sputils import matrix + +from scipy._lib._array_api import ( + SCIPY_ARRAY_API, copy, cov, xp_assert_close, xp_assert_equal +) + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")] +skip_if_array_api = pytest.mark.skip_if_array_api + +TESTDATA_2D = np.array([ + -2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68, + -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45, + 2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28, + -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07, + -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29, + -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25, + 2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21, + -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67, + -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94, + -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33, + 2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8, + -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29, + 2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75, + -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17, + 0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44, + -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83, + 0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28, + 3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62, + -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35, + 3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84, + -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75, + -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86, + -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83, + 0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75, + -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03, + 3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0, + 3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99, + -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21, + 1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75, + 4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37, + -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0, + -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84, + 2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69, + -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51, + -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71, + -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61, + 2.11]).reshape((200, 2)) + + +# Global data +X = np.array([[3.0, 3], [4, 3], [4, 2], + [9, 2], [5, 1], [6, 2], [9, 4], + [5, 2], [5, 4], [7, 4], [6, 5]]) + +CODET1 = np.array([[3.0000, 3.0000], + [6.2000, 4.0000], + [5.8000, 1.8000]]) + +CODET2 = np.array([[11.0/3, 8.0/3], + [6.7500, 4.2500], + [6.2500, 1.7500]]) + +LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1]) + + +class TestWhiten: + + def test_whiten(self, xp): + desired = xp.asarray([[5.08738849, 2.97091878], + [3.19909255, 0.69660580], + [4.51041982, 0.02640918], + [4.38567074, 0.95120889], + [2.32191480, 1.63195503]]) + + obs = xp.asarray([[0.98744510, 0.82766775], + [0.62093317, 0.19406729], + [0.87545741, 0.00735733], + [0.85124403, 0.26499712], + [0.45067590, 0.45464607]]) + xp_assert_close(whiten(obs), desired, rtol=1e-5) + + def test_whiten_zero_std(self, xp): + desired = xp.asarray([[0., 1.0, 2.86666544], + [0., 1.0, 1.32460034], + [0., 1.0, 3.74382172]]) + + obs = xp.asarray([[0., 1., 0.74109533], + [0., 1., 0.34243798], + [0., 1., 0.96785929]]) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + + xp_assert_close(whiten(obs), desired, rtol=1e-5) + + assert_equal(len(w), 1) + assert_(issubclass(w[-1].category, RuntimeWarning)) + + def test_whiten_not_finite(self, xp): + for bad_value in xp.nan, xp.inf, -xp.inf: + obs = xp.asarray([[0.98744510, bad_value], + [0.62093317, 0.19406729], + [0.87545741, 0.00735733], + [0.85124403, 0.26499712], + [0.45067590, 0.45464607]]) + assert_raises(ValueError, whiten, obs) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_whiten_not_finite_matrix(self, xp): + for bad_value in np.nan, np.inf, -np.inf: + obs = matrix([[0.98744510, bad_value], + [0.62093317, 0.19406729], + [0.87545741, 0.00735733], + [0.85124403, 0.26499712], + [0.45067590, 0.45464607]]) + assert_raises(ValueError, whiten, obs) + + +class TestVq: + + @skip_if_array_api(cpu_only=True) + def test_py_vq(self, xp): + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + # label1.dtype varies between int32 and int64 over platforms + label1 = py_vq(xp.asarray(X), xp.asarray(initc))[0] + xp_assert_equal(label1, xp.asarray(LABEL1, dtype=xp.int64), + check_dtype=False) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_py_vq_matrix(self, xp): + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + # label1.dtype varies between int32 and int64 over platforms + label1 = py_vq(matrix(X), matrix(initc))[0] + assert_array_equal(label1, LABEL1) + + @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend']) + def test_vq(self, xp): + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + label1, _ = _vq.vq(xp.asarray(X), xp.asarray(initc)) + assert_array_equal(label1, LABEL1) + _, _ = vq(xp.asarray(X), xp.asarray(initc)) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_vq_matrix(self, xp): + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + label1, _ = _vq.vq(matrix(X), matrix(initc)) + assert_array_equal(label1, LABEL1) + _, _ = vq(matrix(X), matrix(initc)) + + @skip_if_array_api(cpu_only=True) + def test_vq_1d(self, xp): + # Test special rank 1 vq algo, python implementation. + data = X[:, 0] + initc = data[:3] + a, b = _vq.vq(data, initc) + data = xp.asarray(data) + initc = xp.asarray(initc) + ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis]) + # ta.dtype varies between int32 and int64 over platforms + xp_assert_equal(ta, xp.asarray(a, dtype=xp.int64), check_dtype=False) + xp_assert_equal(tb, xp.asarray(b)) + + @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend']) + def test__vq_sametype(self, xp): + a = xp.asarray([1.0, 2.0], dtype=xp.float64) + b = a.astype(xp.float32) + assert_raises(TypeError, _vq.vq, a, b) + + @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend']) + def test__vq_invalid_type(self, xp): + a = xp.asarray([1, 2], dtype=int) + assert_raises(TypeError, _vq.vq, a, a) + + @skip_if_array_api(cpu_only=True) + def test_vq_large_nfeat(self, xp): + X = np.random.rand(20, 20) + code_book = np.random.rand(3, 20) + + codes0, dis0 = _vq.vq(X, code_book) + codes1, dis1 = py_vq( + xp.asarray(X), xp.asarray(code_book) + ) + xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5) + # codes1.dtype varies between int32 and int64 over platforms + xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False) + + X = X.astype(np.float32) + code_book = code_book.astype(np.float32) + + codes0, dis0 = _vq.vq(X, code_book) + codes1, dis1 = py_vq( + xp.asarray(X), xp.asarray(code_book) + ) + xp_assert_close(dis1, xp.asarray(dis0, dtype=xp.float64), rtol=1e-5) + # codes1.dtype varies between int32 and int64 over platforms + xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False) + + @skip_if_array_api(cpu_only=True) + def test_vq_large_features(self, xp): + X = np.random.rand(10, 5) * 1000000 + code_book = np.random.rand(2, 5) * 1000000 + + codes0, dis0 = _vq.vq(X, code_book) + codes1, dis1 = py_vq( + xp.asarray(X), xp.asarray(code_book) + ) + xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5) + # codes1.dtype varies between int32 and int64 over platforms + xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False) + + +# Whole class skipped on GPU for now; +# once pdist/cdist are hooked up for CuPy, more tests will work +@skip_if_array_api(cpu_only=True) +class TestKMean: + + def test_large_features(self, xp): + # Generate a data set with large values, and run kmeans on it to + # (regression for 1077). + d = 300 + n = 100 + + m1 = np.random.randn(d) + m2 = np.random.randn(d) + x = 10000 * np.random.randn(n, d) - 20000 * m1 + y = 10000 * np.random.randn(n, d) + 20000 * m2 + + data = np.empty((x.shape[0] + y.shape[0], d), np.float64) + data[:x.shape[0]] = x + data[x.shape[0]:] = y + + kmeans(xp.asarray(data), 2) + + def test_kmeans_simple(self, xp): + np.random.seed(54321) + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + code1 = kmeans(xp.asarray(X), xp.asarray(initc), iter=1)[0] + xp_assert_close(code1, xp.asarray(CODET2)) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_kmeans_simple_matrix(self, xp): + np.random.seed(54321) + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + code1 = kmeans(matrix(X), matrix(initc), iter=1)[0] + xp_assert_close(code1, CODET2) + + def test_kmeans_lost_cluster(self, xp): + # This will cause kmeans to have a cluster with no points. + data = xp.asarray(TESTDATA_2D) + initk = xp.asarray([[-1.8127404, -0.67128041], + [2.04621601, 0.07401111], + [-2.31149087, -0.05160469]]) + + kmeans(data, initk) + with suppress_warnings() as sup: + sup.filter(UserWarning, + "One of the clusters is empty. Re-run kmeans with a " + "different initialization") + kmeans2(data, initk, missing='warn') + + assert_raises(ClusterError, kmeans2, data, initk, missing='raise') + + def test_kmeans2_simple(self, xp): + np.random.seed(12345678) + initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]])) + arrays = [xp.asarray] if SCIPY_ARRAY_API else [np.asarray, matrix] + for tp in arrays: + code1 = kmeans2(tp(X), tp(initc), iter=1)[0] + code2 = kmeans2(tp(X), tp(initc), iter=2)[0] + + xp_assert_close(code1, xp.asarray(CODET1)) + xp_assert_close(code2, xp.asarray(CODET2)) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_kmeans2_simple_matrix(self, xp): + np.random.seed(12345678) + initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]])) + code1 = kmeans2(matrix(X), matrix(initc), iter=1)[0] + code2 = kmeans2(matrix(X), matrix(initc), iter=2)[0] + + xp_assert_close(code1, CODET1) + xp_assert_close(code2, CODET2) + + def test_kmeans2_rank1(self, xp): + data = xp.asarray(TESTDATA_2D) + data1 = data[:, 0] + + initc = data1[:3] + code = copy(initc, xp=xp) + kmeans2(data1, code, iter=1)[0] + kmeans2(data1, code, iter=2)[0] + + def test_kmeans2_rank1_2(self, xp): + data = xp.asarray(TESTDATA_2D) + data1 = data[:, 0] + kmeans2(data1, 2, iter=1) + + def test_kmeans2_high_dim(self, xp): + # test kmeans2 when the number of dimensions exceeds the number + # of input points + data = xp.asarray(TESTDATA_2D) + data = xp.reshape(data, (20, 20))[:10, :] + kmeans2(data, 2) + + def test_kmeans2_init(self, xp): + np.random.seed(12345) + data = xp.asarray(TESTDATA_2D) + k = 3 + + kmeans2(data, k, minit='points') + kmeans2(data[:, 1], k, minit='points') # special case (1-D) + + kmeans2(data, k, minit='++') + kmeans2(data[:, 1], k, minit='++') # special case (1-D) + + # minit='random' can give warnings, filter those + with suppress_warnings() as sup: + sup.filter(message="One of the clusters is empty. Re-run.") + kmeans2(data, k, minit='random') + kmeans2(data[:, 1], k, minit='random') # special case (1-D) + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MemoryError in Wine.') + def test_krandinit(self, xp): + data = xp.asarray(TESTDATA_2D) + datas = [xp.reshape(data, (200, 2)), + xp.reshape(data, (20, 20))[:10, :]] + k = int(1e6) + for data in datas: + rng = np.random.default_rng(1234) + init = _krandinit(data, k, rng, xp) + orig_cov = cov(data.T) + init_cov = cov(init.T) + xp_assert_close(orig_cov, init_cov, atol=1e-2) + + def test_kmeans2_empty(self, xp): + # Regression test for gh-1032. + assert_raises(ValueError, kmeans2, xp.asarray([]), 2) + + def test_kmeans_0k(self, xp): + # Regression test for gh-1073: fail when k arg is 0. + assert_raises(ValueError, kmeans, xp.asarray(X), 0) + assert_raises(ValueError, kmeans2, xp.asarray(X), 0) + assert_raises(ValueError, kmeans2, xp.asarray(X), xp.asarray([])) + + def test_kmeans_large_thres(self, xp): + # Regression test for gh-1774 + x = xp.asarray([1, 2, 3, 4, 10], dtype=xp.float64) + res = kmeans(x, 1, thresh=1e16) + xp_assert_close(res[0], xp.asarray([4.], dtype=xp.float64)) + xp_assert_close(res[1], xp.asarray(2.3999999999999999, dtype=xp.float64)[()]) + + def test_kmeans2_kpp_low_dim(self, xp): + # Regression test for gh-11462 + prev_res = xp.asarray([[-1.95266667, 0.898], + [-3.153375, 3.3945]], dtype=xp.float64) + np.random.seed(42) + res, _ = kmeans2(xp.asarray(TESTDATA_2D), 2, minit='++') + xp_assert_close(res, prev_res) + + def test_kmeans2_kpp_high_dim(self, xp): + # Regression test for gh-11462 + n_dim = 100 + size = 10 + centers = np.vstack([5 * np.ones(n_dim), + -5 * np.ones(n_dim)]) + np.random.seed(42) + data = np.vstack([ + np.random.multivariate_normal(centers[0], np.eye(n_dim), size=size), + np.random.multivariate_normal(centers[1], np.eye(n_dim), size=size) + ]) + + data = xp.asarray(data) + res, _ = kmeans2(data, 2, minit='++') + xp_assert_equal(xp.sign(res), xp.sign(xp.asarray(centers))) + + def test_kmeans_diff_convergence(self, xp): + # Regression test for gh-8727 + obs = xp.asarray([-3, -1, 0, 1, 1, 8], dtype=xp.float64) + res = kmeans(obs, xp.asarray([-3., 0.99])) + xp_assert_close(res[0], xp.asarray([-0.4, 8.], dtype=xp.float64)) + xp_assert_close(res[1], xp.asarray(1.0666666666666667, dtype=xp.float64)[()]) + + def test_kmeans_and_kmeans2_random_seed(self, xp): + + seed_list = [ + 1234, np.random.RandomState(1234), np.random.default_rng(1234) + ] + + for seed in seed_list: + seed1 = deepcopy(seed) + seed2 = deepcopy(seed) + data = xp.asarray(TESTDATA_2D) + # test for kmeans + res1, _ = kmeans(data, 2, seed=seed1) + res2, _ = kmeans(data, 2, seed=seed2) + xp_assert_close(res1, res2, xp=xp) # should be same results + # test for kmeans2 + for minit in ["random", "points", "++"]: + res1, _ = kmeans2(data, 2, minit=minit, seed=seed1) + res2, _ = kmeans2(data, 2, minit=minit, seed=seed2) + xp_assert_close(res1, res2, xp=xp) # should be same results diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/cluster/vq.py b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/vq.py new file mode 100644 index 0000000000000000000000000000000000000000..f4b5179ce524f2dbb15939d004b5f7ff3cfa6847 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/cluster/vq.py @@ -0,0 +1,835 @@ +""" +K-means clustering and vector quantization (:mod:`scipy.cluster.vq`) +==================================================================== + +Provides routines for k-means clustering, generating code books +from k-means models and quantizing vectors by comparing them with +centroids in a code book. + +.. autosummary:: + :toctree: generated/ + + whiten -- Normalize a group of observations so each feature has unit variance + vq -- Calculate code book membership of a set of observation vectors + kmeans -- Perform k-means on a set of observation vectors forming k clusters + kmeans2 -- A different implementation of k-means with more methods + -- for initializing centroids + +Background information +---------------------- +The k-means algorithm takes as input the number of clusters to +generate, k, and a set of observation vectors to cluster. It +returns a set of centroids, one for each of the k clusters. An +observation vector is classified with the cluster number or +centroid index of the centroid closest to it. + +A vector v belongs to cluster i if it is closer to centroid i than +any other centroid. If v belongs to i, we say centroid i is the +dominating centroid of v. The k-means algorithm tries to +minimize distortion, which is defined as the sum of the squared distances +between each observation vector and its dominating centroid. +The minimization is achieved by iteratively reclassifying +the observations into clusters and recalculating the centroids until +a configuration is reached in which the centroids are stable. One can +also define a maximum number of iterations. + +Since vector quantization is a natural application for k-means, +information theory terminology is often used. The centroid index +or cluster index is also referred to as a "code" and the table +mapping codes to centroids and, vice versa, is often referred to as a +"code book". The result of k-means, a set of centroids, can be +used to quantize vectors. Quantization aims to find an encoding of +vectors that reduces the expected distortion. + +All routines expect obs to be an M by N array, where the rows are +the observation vectors. The codebook is a k by N array, where the +ith row is the centroid of code word i. The observation vectors +and centroids have the same feature dimension. + +As an example, suppose we wish to compress a 24-bit color image +(each pixel is represented by one byte for red, one for blue, and +one for green) before sending it over the web. By using a smaller +8-bit encoding, we can reduce the amount of data by two +thirds. Ideally, the colors for each of the 256 possible 8-bit +encoding values should be chosen to minimize distortion of the +color. Running k-means with k=256 generates a code book of 256 +codes, which fills up all possible 8-bit sequences. Instead of +sending a 3-byte value for each pixel, the 8-bit centroid index +(or code word) of the dominating centroid is transmitted. The code +book is also sent over the wire so each 8-bit code can be +translated back to a 24-bit pixel value representation. If the +image of interest was of an ocean, we would expect many 24-bit +blues to be represented by 8-bit codes. If it was an image of a +human face, more flesh-tone colors would be represented in the +code book. + +""" +import warnings +import numpy as np +from collections import deque +from scipy._lib._array_api import ( + _asarray, array_namespace, size, atleast_nd, copy, cov +) +from scipy._lib._util import check_random_state, rng_integers +from scipy.spatial.distance import cdist + +from . import _vq + +__docformat__ = 'restructuredtext' + +__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2'] + + +class ClusterError(Exception): + pass + + +def whiten(obs, check_finite=True): + """ + Normalize a group of observations on a per feature basis. + + Before running k-means, it is beneficial to rescale each feature + dimension of the observation set by its standard deviation (i.e. "whiten" + it - as in "white noise" where each frequency has equal power). + Each feature is divided by its standard deviation across all observations + to give it unit variance. + + Parameters + ---------- + obs : ndarray + Each row of the array is an observation. The + columns are the features seen during each observation. + + >>> # f0 f1 f2 + >>> obs = [[ 1., 1., 1.], #o0 + ... [ 2., 2., 2.], #o1 + ... [ 3., 3., 3.], #o2 + ... [ 4., 4., 4.]] #o3 + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + result : ndarray + Contains the values in `obs` scaled by the standard deviation + of each column. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.vq import whiten + >>> features = np.array([[1.9, 2.3, 1.7], + ... [1.5, 2.5, 2.2], + ... [0.8, 0.6, 1.7,]]) + >>> whiten(features) + array([[ 4.17944278, 2.69811351, 7.21248917], + [ 3.29956009, 2.93273208, 9.33380951], + [ 1.75976538, 0.7038557 , 7.21248917]]) + + """ + xp = array_namespace(obs) + obs = _asarray(obs, check_finite=check_finite, xp=xp) + std_dev = xp.std(obs, axis=0) + zero_std_mask = std_dev == 0 + if xp.any(zero_std_mask): + std_dev[zero_std_mask] = 1.0 + warnings.warn("Some columns have standard deviation zero. " + "The values of these columns will not change.", + RuntimeWarning, stacklevel=2) + return obs / std_dev + + +def vq(obs, code_book, check_finite=True): + """ + Assign codes from a code book to observations. + + Assigns a code from a code book to each observation. Each + observation vector in the 'M' by 'N' `obs` array is compared with the + centroids in the code book and assigned the code of the closest + centroid. + + The features in `obs` should have unit variance, which can be + achieved by passing them through the whiten function. The code + book can be created with the k-means algorithm or a different + encoding algorithm. + + Parameters + ---------- + obs : ndarray + Each row of the 'M' x 'N' array is an observation. The columns are + the "features" seen during each observation. The features must be + whitened first using the whiten function or something equivalent. + code_book : ndarray + The code book is usually generated using the k-means algorithm. + Each row of the array holds a different code, and the columns are + the features of the code. + + >>> # f0 f1 f2 f3 + >>> code_book = [ + ... [ 1., 2., 3., 4.], #c0 + ... [ 1., 2., 3., 4.], #c1 + ... [ 1., 2., 3., 4.]] #c2 + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + code : ndarray + A length M array holding the code book index for each observation. + dist : ndarray + The distortion (distance) between the observation and its nearest + code. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.vq import vq + >>> code_book = np.array([[1., 1., 1.], + ... [2., 2., 2.]]) + >>> features = np.array([[1.9, 2.3, 1.7], + ... [1.5, 2.5, 2.2], + ... [0.8, 0.6, 1.7]]) + >>> vq(features, code_book) + (array([1, 1, 0], dtype=int32), array([0.43588989, 0.73484692, 0.83066239])) + + """ + xp = array_namespace(obs, code_book) + obs = _asarray(obs, xp=xp, check_finite=check_finite) + code_book = _asarray(code_book, xp=xp, check_finite=check_finite) + ct = xp.result_type(obs, code_book) + + c_obs = xp.astype(obs, ct, copy=False) + c_code_book = xp.astype(code_book, ct, copy=False) + + if xp.isdtype(ct, kind='real floating'): + c_obs = np.asarray(c_obs) + c_code_book = np.asarray(c_code_book) + result = _vq.vq(c_obs, c_code_book) + return xp.asarray(result[0]), xp.asarray(result[1]) + return py_vq(obs, code_book, check_finite=False) + + +def py_vq(obs, code_book, check_finite=True): + """ Python version of vq algorithm. + + The algorithm computes the Euclidean distance between each + observation and every frame in the code_book. + + Parameters + ---------- + obs : ndarray + Expects a rank 2 array. Each row is one observation. + code_book : ndarray + Code book to use. Same format than obs. Should have same number of + features (e.g., columns) than obs. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + code : ndarray + code[i] gives the label of the ith obversation; its code is + code_book[code[i]]. + mind_dist : ndarray + min_dist[i] gives the distance between the ith observation and its + corresponding code. + + Notes + ----- + This function is slower than the C version but works for + all input types. If the inputs have the wrong types for the + C versions of the function, this one is called as a last resort. + + It is about 20 times slower than the C version. + + """ + xp = array_namespace(obs, code_book) + obs = _asarray(obs, xp=xp, check_finite=check_finite) + code_book = _asarray(code_book, xp=xp, check_finite=check_finite) + + if obs.ndim != code_book.ndim: + raise ValueError("Observation and code_book should have the same rank") + + if obs.ndim == 1: + obs = obs[:, xp.newaxis] + code_book = code_book[:, xp.newaxis] + + # Once `cdist` has array API support, this `xp.asarray` call can be removed + dist = xp.asarray(cdist(obs, code_book)) + code = xp.argmin(dist, axis=1) + min_dist = xp.min(dist, axis=1) + return code, min_dist + + +def _kmeans(obs, guess, thresh=1e-5, xp=None): + """ "raw" version of k-means. + + Returns + ------- + code_book + The lowest distortion codebook found. + avg_dist + The average distance a observation is from a code in the book. + Lower means the code_book matches the data better. + + See Also + -------- + kmeans : wrapper around k-means + + Examples + -------- + Note: not whitened in this example. + + >>> import numpy as np + >>> from scipy.cluster.vq import _kmeans + >>> features = np.array([[ 1.9,2.3], + ... [ 1.5,2.5], + ... [ 0.8,0.6], + ... [ 0.4,1.8], + ... [ 1.0,1.0]]) + >>> book = np.array((features[0],features[2])) + >>> _kmeans(features,book) + (array([[ 1.7 , 2.4 ], + [ 0.73333333, 1.13333333]]), 0.40563916697728591) + + """ + xp = np if xp is None else xp + code_book = guess + diff = xp.inf + prev_avg_dists = deque([diff], maxlen=2) + while diff > thresh: + # compute membership and distances between obs and code_book + obs_code, distort = vq(obs, code_book, check_finite=False) + prev_avg_dists.append(xp.mean(distort, axis=-1)) + # recalc code_book as centroids of associated obs + obs = np.asarray(obs) + obs_code = np.asarray(obs_code) + code_book, has_members = _vq.update_cluster_means(obs, obs_code, + code_book.shape[0]) + obs = xp.asarray(obs) + obs_code = xp.asarray(obs_code) + code_book = xp.asarray(code_book) + has_members = xp.asarray(has_members) + code_book = code_book[has_members] + diff = xp.abs(prev_avg_dists[0] - prev_avg_dists[1]) + + return code_book, prev_avg_dists[1] + + +def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True, + *, seed=None): + """ + Performs k-means on a set of observation vectors forming k clusters. + + The k-means algorithm adjusts the classification of the observations + into clusters and updates the cluster centroids until the position of + the centroids is stable over successive iterations. In this + implementation of the algorithm, the stability of the centroids is + determined by comparing the absolute value of the change in the average + Euclidean distance between the observations and their corresponding + centroids against a threshold. This yields + a code book mapping centroids to codes and vice versa. + + Parameters + ---------- + obs : ndarray + Each row of the M by N array is an observation vector. The + columns are the features seen during each observation. + The features must be whitened first with the `whiten` function. + + k_or_guess : int or ndarray + The number of centroids to generate. A code is assigned to + each centroid, which is also the row index of the centroid + in the code_book matrix generated. + + The initial k centroids are chosen by randomly selecting + observations from the observation matrix. Alternatively, + passing a k by N array specifies the initial k centroids. + + iter : int, optional + The number of times to run k-means, returning the codebook + with the lowest distortion. This argument is ignored if + initial centroids are specified with an array for the + ``k_or_guess`` parameter. This parameter does not represent the + number of iterations of the k-means algorithm. + + thresh : float, optional + Terminates the k-means algorithm if the change in + distortion since the last k-means iteration is less than + or equal to threshold. + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + Seed for initializing the pseudo-random number generator. + If `seed` is None (or `numpy.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + The default is None. + + Returns + ------- + codebook : ndarray + A k by N array of k centroids. The ith centroid + codebook[i] is represented with the code i. The centroids + and codes generated represent the lowest distortion seen, + not necessarily the globally minimal distortion. + Note that the number of centroids is not necessarily the same as the + ``k_or_guess`` parameter, because centroids assigned to no observations + are removed during iterations. + + distortion : float + The mean (non-squared) Euclidean distance between the observations + passed and the centroids generated. Note the difference to the standard + definition of distortion in the context of the k-means algorithm, which + is the sum of the squared distances. + + See Also + -------- + kmeans2 : a different implementation of k-means clustering + with more methods for generating initial centroids but without + using a distortion change threshold as a stopping criterion. + + whiten : must be called prior to passing an observation matrix + to kmeans. + + Notes + ----- + For more functionalities or optimal performance, you can use + `sklearn.cluster.KMeans `_. + `This `_ + is a benchmark result of several implementations. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.vq import vq, kmeans, whiten + >>> import matplotlib.pyplot as plt + >>> features = np.array([[ 1.9,2.3], + ... [ 1.5,2.5], + ... [ 0.8,0.6], + ... [ 0.4,1.8], + ... [ 0.1,0.1], + ... [ 0.2,1.8], + ... [ 2.0,0.5], + ... [ 0.3,1.5], + ... [ 1.0,1.0]]) + >>> whitened = whiten(features) + >>> book = np.array((whitened[0],whitened[2])) + >>> kmeans(whitened,book) + (array([[ 2.3110306 , 2.86287398], # random + [ 0.93218041, 1.24398691]]), 0.85684700941625547) + + >>> codes = 3 + >>> kmeans(whitened,codes) + (array([[ 2.3110306 , 2.86287398], # random + [ 1.32544402, 0.65607529], + [ 0.40782893, 2.02786907]]), 0.5196582527686241) + + >>> # Create 50 datapoints in two clusters a and b + >>> pts = 50 + >>> rng = np.random.default_rng() + >>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts) + >>> b = rng.multivariate_normal([30, 10], + ... [[10, 2], [2, 1]], + ... size=pts) + >>> features = np.concatenate((a, b)) + >>> # Whiten data + >>> whitened = whiten(features) + >>> # Find 2 clusters in the data + >>> codebook, distortion = kmeans(whitened, 2) + >>> # Plot whitened data and cluster centers in red + >>> plt.scatter(whitened[:, 0], whitened[:, 1]) + >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r') + >>> plt.show() + + """ + if isinstance(k_or_guess, int): + xp = array_namespace(obs) + else: + xp = array_namespace(obs, k_or_guess) + obs = _asarray(obs, xp=xp, check_finite=check_finite) + guess = _asarray(k_or_guess, xp=xp, check_finite=check_finite) + if iter < 1: + raise ValueError("iter must be at least 1, got %s" % iter) + + # Determine whether a count (scalar) or an initial guess (array) was passed. + if size(guess) != 1: + if size(guess) < 1: + raise ValueError("Asked for 0 clusters. Initial book was %s" % + guess) + return _kmeans(obs, guess, thresh=thresh, xp=xp) + + # k_or_guess is a scalar, now verify that it's an integer + k = int(guess) + if k != guess: + raise ValueError("If k_or_guess is a scalar, it must be an integer.") + if k < 1: + raise ValueError("Asked for %d clusters." % k) + + rng = check_random_state(seed) + + # initialize best distance value to a large value + best_dist = xp.inf + for i in range(iter): + # the initial code book is randomly selected from observations + guess = _kpoints(obs, k, rng, xp) + book, dist = _kmeans(obs, guess, thresh=thresh, xp=xp) + if dist < best_dist: + best_book = book + best_dist = dist + return best_book, best_dist + + +def _kpoints(data, k, rng, xp): + """Pick k points at random in data (one row = one observation). + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 are assumed to describe one + dimensional data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + rng : `numpy.random.Generator` or `numpy.random.RandomState` + Random number generator. + + Returns + ------- + x : ndarray + A 'k' by 'N' containing the initial centroids + + """ + idx = rng.choice(data.shape[0], size=int(k), replace=False) + # convert to array with default integer dtype (avoids numpy#25607) + idx = xp.asarray(idx, dtype=xp.asarray([1]).dtype) + return xp.take(data, idx, axis=0) + + +def _krandinit(data, k, rng, xp): + """Returns k samples of a random variable whose parameters depend on data. + + More precisely, it returns k observations sampled from a Gaussian random + variable whose mean and covariances are the ones estimated from the data. + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D + data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + rng : `numpy.random.Generator` or `numpy.random.RandomState` + Random number generator. + + Returns + ------- + x : ndarray + A 'k' by 'N' containing the initial centroids + + """ + mu = xp.mean(data, axis=0) + k = np.asarray(k) + + if data.ndim == 1: + _cov = cov(data) + x = rng.standard_normal(size=k) + x = xp.asarray(x) + x *= xp.sqrt(_cov) + elif data.shape[1] > data.shape[0]: + # initialize when the covariance matrix is rank deficient + _, s, vh = xp.linalg.svd(data - mu, full_matrices=False) + x = rng.standard_normal(size=(k, size(s))) + x = xp.asarray(x) + sVh = s[:, None] * vh / xp.sqrt(data.shape[0] - xp.asarray(1.)) + x = x @ sVh + else: + _cov = atleast_nd(cov(data.T), ndim=2) + + # k rows, d cols (one row = one obs) + # Generate k sample of a random variable ~ Gaussian(mu, cov) + x = rng.standard_normal(size=(k, size(mu))) + x = xp.asarray(x) + x = x @ xp.linalg.cholesky(_cov).T + + x += mu + return x + + +def _kpp(data, k, rng, xp): + """ Picks k points in the data based on the kmeans++ method. + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D + data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + rng : `numpy.random.Generator` or `numpy.random.RandomState` + Random number generator. + + Returns + ------- + init : ndarray + A 'k' by 'N' containing the initial centroids. + + References + ---------- + .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of + careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium + on Discrete Algorithms, 2007. + """ + + ndim = len(data.shape) + if ndim == 1: + data = data[:, None] + + dims = data.shape[1] + + init = xp.empty((int(k), dims)) + + for i in range(k): + if i == 0: + init[i, :] = data[rng_integers(rng, data.shape[0]), :] + + else: + D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0) + probs = D2/D2.sum() + cumprobs = probs.cumsum() + r = rng.uniform() + cumprobs = np.asarray(cumprobs) + init[i, :] = data[np.searchsorted(cumprobs, r), :] + + if ndim == 1: + init = init[:, 0] + return init + + +_valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp} + + +def _missing_warn(): + """Print a warning when called.""" + warnings.warn("One of the clusters is empty. " + "Re-run kmeans with a different initialization.", + stacklevel=3) + + +def _missing_raise(): + """Raise a ClusterError when called.""" + raise ClusterError("One of the clusters is empty. " + "Re-run kmeans with a different initialization.") + + +_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise} + + +def kmeans2(data, k, iter=10, thresh=1e-5, minit='random', + missing='warn', check_finite=True, *, seed=None): + """ + Classify a set of observations into k clusters using the k-means algorithm. + + The algorithm attempts to minimize the Euclidean distance between + observations and centroids. Several initialization methods are + included. + + Parameters + ---------- + data : ndarray + A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length + 'M' array of 'M' 1-D observations. + k : int or ndarray + The number of clusters to form as well as the number of + centroids to generate. If `minit` initialization string is + 'matrix', or if a ndarray is given instead, it is + interpreted as initial cluster to use instead. + iter : int, optional + Number of iterations of the k-means algorithm to run. Note + that this differs in meaning from the iters parameter to + the kmeans function. + thresh : float, optional + (not used yet) + minit : str, optional + Method for initialization. Available methods are 'random', + 'points', '++' and 'matrix': + + 'random': generate k centroids from a Gaussian with mean and + variance estimated from the data. + + 'points': choose k observations (rows) at random from data for + the initial centroids. + + '++': choose k observations accordingly to the kmeans++ method + (careful seeding) + + 'matrix': interpret the k parameter as a k by M (or length k + array for 1-D data) array of initial centroids. + missing : str, optional + Method to deal with empty clusters. Available methods are + 'warn' and 'raise': + + 'warn': give a warning and continue. + + 'raise': raise an ClusterError and terminate the algorithm. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + Seed for initializing the pseudo-random number generator. + If `seed` is None (or `numpy.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + The default is None. + + Returns + ------- + centroid : ndarray + A 'k' by 'N' array of centroids found at the last iteration of + k-means. + label : ndarray + label[i] is the code or index of the centroid the + ith observation is closest to. + + See Also + -------- + kmeans + + References + ---------- + .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of + careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium + on Discrete Algorithms, 2007. + + Examples + -------- + >>> from scipy.cluster.vq import kmeans2 + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + Create z, an array with shape (100, 2) containing a mixture of samples + from three multivariate normal distributions. + + >>> rng = np.random.default_rng() + >>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45) + >>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30) + >>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25) + >>> z = np.concatenate((a, b, c)) + >>> rng.shuffle(z) + + Compute three clusters. + + >>> centroid, label = kmeans2(z, 3, minit='points') + >>> centroid + array([[ 2.22274463, -0.61666946], # may vary + [ 0.54069047, 5.86541444], + [ 6.73846769, 4.01991898]]) + + How many points are in each cluster? + + >>> counts = np.bincount(label) + >>> counts + array([29, 51, 20]) # may vary + + Plot the clusters. + + >>> w0 = z[label == 0] + >>> w1 = z[label == 1] + >>> w2 = z[label == 2] + >>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0') + >>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1') + >>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2') + >>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids') + >>> plt.axis('equal') + >>> plt.legend(shadow=True) + >>> plt.show() + + """ + if int(iter) < 1: + raise ValueError("Invalid iter (%s), " + "must be a positive integer." % iter) + try: + miss_meth = _valid_miss_meth[missing] + except KeyError as e: + raise ValueError(f"Unknown missing method {missing!r}") from e + + if isinstance(k, int): + xp = array_namespace(data) + else: + xp = array_namespace(data, k) + data = _asarray(data, xp=xp, check_finite=check_finite) + code_book = copy(k, xp=xp) + if data.ndim == 1: + d = 1 + elif data.ndim == 2: + d = data.shape[1] + else: + raise ValueError("Input of rank > 2 is not supported.") + + if size(data) < 1 or size(code_book) < 1: + raise ValueError("Empty input is not supported.") + + # If k is not a single value, it should be compatible with data's shape + if minit == 'matrix' or size(code_book) > 1: + if data.ndim != code_book.ndim: + raise ValueError("k array doesn't match data rank") + nc = code_book.shape[0] + if data.ndim > 1 and code_book.shape[1] != d: + raise ValueError("k array doesn't match data dimension") + else: + nc = int(code_book) + + if nc < 1: + raise ValueError("Cannot ask kmeans2 for %d clusters" + " (k was %s)" % (nc, code_book)) + elif nc != code_book: + warnings.warn("k was not an integer, was converted.", stacklevel=2) + + try: + init_meth = _valid_init_meth[minit] + except KeyError as e: + raise ValueError(f"Unknown init method {minit!r}") from e + else: + rng = check_random_state(seed) + code_book = init_meth(data, code_book, rng, xp) + + data = np.asarray(data) + code_book = np.asarray(code_book) + for i in range(iter): + # Compute the nearest neighbor for each obs using the current code book + label = vq(data, code_book, check_finite=check_finite)[0] + # Update the code book by computing centroids + new_code_book, has_members = _vq.update_cluster_means(data, label, nc) + if not has_members.all(): + miss_meth() + # Set the empty clusters to their previous positions + new_code_book[~has_members] = code_book[~has_members] + code_book = new_code_book + + return xp.asarray(code_book), xp.asarray(label) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..10f4b39e48e2d6c0b042582ca65f572bde6ba575 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__init__.py @@ -0,0 +1,103 @@ +""" +========================================================= +Legacy discrete Fourier transforms (:mod:`scipy.fftpack`) +========================================================= + +.. legacy:: + + New code should use :mod:`scipy.fft`. + +Fast Fourier Transforms (FFTs) +============================== + +.. autosummary:: + :toctree: generated/ + + fft - Fast (discrete) Fourier Transform (FFT) + ifft - Inverse FFT + fft2 - 2-D FFT + ifft2 - 2-D inverse FFT + fftn - N-D FFT + ifftn - N-D inverse FFT + rfft - FFT of strictly real-valued sequence + irfft - Inverse of rfft + dct - Discrete cosine transform + idct - Inverse discrete cosine transform + dctn - N-D Discrete cosine transform + idctn - N-D Inverse discrete cosine transform + dst - Discrete sine transform + idst - Inverse discrete sine transform + dstn - N-D Discrete sine transform + idstn - N-D Inverse discrete sine transform + +Differential and pseudo-differential operators +============================================== + +.. autosummary:: + :toctree: generated/ + + diff - Differentiation and integration of periodic sequences + tilbert - Tilbert transform: cs_diff(x,h,h) + itilbert - Inverse Tilbert transform: sc_diff(x,h,h) + hilbert - Hilbert transform: cs_diff(x,inf,inf) + ihilbert - Inverse Hilbert transform: sc_diff(x,inf,inf) + cs_diff - cosh/sinh pseudo-derivative of periodic sequences + sc_diff - sinh/cosh pseudo-derivative of periodic sequences + ss_diff - sinh/sinh pseudo-derivative of periodic sequences + cc_diff - cosh/cosh pseudo-derivative of periodic sequences + shift - Shift periodic sequences + +Helper functions +================ + +.. autosummary:: + :toctree: generated/ + + fftshift - Shift the zero-frequency component to the center of the spectrum + ifftshift - The inverse of `fftshift` + fftfreq - Return the Discrete Fourier Transform sample frequencies + rfftfreq - DFT sample frequencies (for usage with rfft, irfft) + next_fast_len - Find the optimal length to zero-pad an FFT for speed + +Note that ``fftshift``, ``ifftshift`` and ``fftfreq`` are numpy functions +exposed by ``fftpack``; importing them from ``numpy`` should be preferred. + +Convolutions (:mod:`scipy.fftpack.convolve`) +============================================ + +.. module:: scipy.fftpack.convolve + +.. autosummary:: + :toctree: generated/ + + convolve + convolve_z + init_convolution_kernel + destroy_convolve_cache + +""" + + +__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft', + 'fft2','ifft2', + 'diff', + 'tilbert','itilbert','hilbert','ihilbert', + 'sc_diff','cs_diff','cc_diff','ss_diff', + 'shift', + 'fftfreq', 'rfftfreq', + 'fftshift', 'ifftshift', + 'next_fast_len', + 'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn' + ] + +from ._basic import * +from ._pseudo_diffs import * +from ._helper import * +from ._realtransforms import * + +# Deprecated namespaces, to be removed in v2.0.0 +from . import basic, helper, pseudo_diffs, realtransforms + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89bb6c24694795c777cd36af6041f5a8a2799449 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_basic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e5d021602d8c6b12555713d9a7cfe487a3a5971 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_basic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_helper.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26e5a1d022e92ee659f29b0853dcdada06991c74 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_helper.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e87452a4203bd7de54a89407da3e0d37c6e793e8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1287a22619e093082fd9feaef2d637080efdc522 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/basic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ce0af623b2eb626d8e0c12943b6d587de385ada Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/basic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/helper.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6373487aa3e8527b248731bbf421dd55a40420f1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/helper.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f010b73f6628204bd9b77a443bef1f331ef3d21 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3733b89e1cd821d2c7766d528a5b237c40677cdb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_basic.py b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..59c85ae4b364464a66489ef221f7f7ac45624694 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_basic.py @@ -0,0 +1,428 @@ +""" +Discrete Fourier Transforms - _basic.py +""" +# Created by Pearu Peterson, August,September 2002 +__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft', + 'fft2','ifft2'] + +from scipy.fft import _pocketfft +from ._helper import _good_shape + + +def fft(x, n=None, axis=-1, overwrite_x=False): + """ + Return discrete Fourier transform of real or complex sequence. + + The returned complex array contains ``y(0), y(1),..., y(n-1)``, where + + ``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``. + + Parameters + ---------- + x : array_like + Array to Fourier transform. + n : int, optional + Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the fft's are computed; the default is over the + last axis (i.e., ``axis=-1``). + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + z : complex ndarray + with the elements:: + + [y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)] if n is even + [y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd + + where:: + + y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1 + + See Also + -------- + ifft : Inverse FFT + rfft : FFT of a real sequence + + Notes + ----- + The packing of the result is "standard": If ``A = fft(a, n)``, then + ``A[0]`` contains the zero-frequency term, ``A[1:n/2]`` contains the + positive-frequency terms, and ``A[n/2:]`` contains the negative-frequency + terms, in order of decreasingly negative frequency. So ,for an 8-point + transform, the frequencies of the result are [0, 1, 2, 3, -4, -3, -2, -1]. + To rearrange the fft output so that the zero-frequency component is + centered, like [-4, -3, -2, -1, 0, 1, 2, 3], use `fftshift`. + + Both single and double precision routines are implemented. Half precision + inputs will be converted to single precision. Non-floating-point inputs + will be converted to double precision. Long-double precision inputs are + not supported. + + This function is most efficient when `n` is a power of two, and least + efficient when `n` is prime. + + Note that if ``x`` is real-valued, then ``A[j] == A[n-j].conjugate()``. + If ``x`` is real-valued and ``n`` is even, then ``A[n/2]`` is real. + + If the data type of `x` is real, a "real FFT" algorithm is automatically + used, which roughly halves the computation time. To increase efficiency + a little further, use `rfft`, which does the same calculation, but only + outputs half of the symmetrical spectrum. If the data is both real and + symmetrical, the `dct` can again double the efficiency by generating + half of the spectrum from half of the signal. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import fft, ifft + >>> x = np.arange(5) + >>> np.allclose(fft(ifft(x)), x, atol=1e-15) # within numerical accuracy. + True + + """ + return _pocketfft.fft(x, n, axis, None, overwrite_x) + + +def ifft(x, n=None, axis=-1, overwrite_x=False): + """ + Return discrete inverse Fourier transform of real or complex sequence. + + The returned complex array contains ``y(0), y(1),..., y(n-1)``, where + + ``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``. + + Parameters + ---------- + x : array_like + Transformed data to invert. + n : int, optional + Length of the inverse Fourier transform. If ``n < x.shape[axis]``, + `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded. + The default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the ifft's are computed; the default is over the + last axis (i.e., ``axis=-1``). + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + ifft : ndarray of floats + The inverse discrete Fourier transform. + + See Also + -------- + fft : Forward FFT + + Notes + ----- + Both single and double precision routines are implemented. Half precision + inputs will be converted to single precision. Non-floating-point inputs + will be converted to double precision. Long-double precision inputs are + not supported. + + This function is most efficient when `n` is a power of two, and least + efficient when `n` is prime. + + If the data type of `x` is real, a "real IFFT" algorithm is automatically + used, which roughly halves the computation time. + + Examples + -------- + >>> from scipy.fftpack import fft, ifft + >>> import numpy as np + >>> x = np.arange(5) + >>> np.allclose(ifft(fft(x)), x, atol=1e-15) # within numerical accuracy. + True + + """ + return _pocketfft.ifft(x, n, axis, None, overwrite_x) + + +def rfft(x, n=None, axis=-1, overwrite_x=False): + """ + Discrete Fourier transform of a real sequence. + + Parameters + ---------- + x : array_like, real-valued + The data to transform. + n : int, optional + Defines the length of the Fourier transform. If `n` is not specified + (the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``, + `x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded. + axis : int, optional + The axis along which the transform is applied. The default is the + last axis. + overwrite_x : bool, optional + If set to true, the contents of `x` can be overwritten. Default is + False. + + Returns + ------- + z : real ndarray + The returned real array contains:: + + [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even + [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd + + where:: + + y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n) + j = 0..n-1 + + See Also + -------- + fft, irfft, scipy.fft.rfft + + Notes + ----- + Within numerical accuracy, ``y == rfft(irfft(y))``. + + Both single and double precision routines are implemented. Half precision + inputs will be converted to single precision. Non-floating-point inputs + will be converted to double precision. Long-double precision inputs are + not supported. + + To get an output with a complex datatype, consider using the newer + function `scipy.fft.rfft`. + + Examples + -------- + >>> from scipy.fftpack import fft, rfft + >>> a = [9, -9, 1, 3] + >>> fft(a) + array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j]) + >>> rfft(a) + array([ 4., 8., 12., 16.]) + + """ + return _pocketfft.rfft_fftpack(x, n, axis, None, overwrite_x) + + +def irfft(x, n=None, axis=-1, overwrite_x=False): + """ + Return inverse discrete Fourier transform of real sequence x. + + The contents of `x` are interpreted as the output of the `rfft` + function. + + Parameters + ---------- + x : array_like + Transformed data to invert. + n : int, optional + Length of the inverse Fourier transform. + If n < x.shape[axis], x is truncated. + If n > x.shape[axis], x is zero-padded. + The default results in n = x.shape[axis]. + axis : int, optional + Axis along which the ifft's are computed; the default is over + the last axis (i.e., axis=-1). + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + irfft : ndarray of floats + The inverse discrete Fourier transform. + + See Also + -------- + rfft, ifft, scipy.fft.irfft + + Notes + ----- + The returned real array contains:: + + [y(0),y(1),...,y(n-1)] + + where for n is even:: + + y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k]) + * exp(sqrt(-1)*j*k* 2*pi/n) + + c.c. + x[0] + (-1)**(j) x[n-1]) + + and for n is odd:: + + y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k]) + * exp(sqrt(-1)*j*k* 2*pi/n) + + c.c. + x[0]) + + c.c. denotes complex conjugate of preceding expression. + + For details on input parameters, see `rfft`. + + To process (conjugate-symmetric) frequency-domain data with a complex + datatype, consider using the newer function `scipy.fft.irfft`. + + Examples + -------- + >>> from scipy.fftpack import rfft, irfft + >>> a = [1.0, 2.0, 3.0, 4.0, 5.0] + >>> irfft(a) + array([ 2.6 , -3.16405192, 1.24398433, -1.14955713, 1.46962473]) + >>> irfft(rfft(a)) + array([1., 2., 3., 4., 5.]) + + """ + return _pocketfft.irfft_fftpack(x, n, axis, None, overwrite_x) + + +def fftn(x, shape=None, axes=None, overwrite_x=False): + """ + Return multidimensional discrete Fourier transform. + + The returned array contains:: + + y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1] + x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i) + + where d = len(x.shape) and n = x.shape. + + Parameters + ---------- + x : array_like + The (N-D) array to transform. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the ith dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + The axes of `x` (`y` if `shape` is not None) along which the + transform is applied. + The default is over all axes. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed. Default is False. + + Returns + ------- + y : complex-valued N-D NumPy array + The (N-D) DFT of the input array. + + See Also + -------- + ifftn + + Notes + ----- + If ``x`` is real-valued, then + ``y[..., j_i, ...] == y[..., n_i-j_i, ...].conjugate()``. + + Both single and double precision routines are implemented. Half precision + inputs will be converted to single precision. Non-floating-point inputs + will be converted to double precision. Long-double precision inputs are + not supported. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import fftn, ifftn + >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16)) + >>> np.allclose(y, fftn(ifftn(y))) + True + + """ + shape = _good_shape(x, shape, axes) + return _pocketfft.fftn(x, shape, axes, None, overwrite_x) + + +def ifftn(x, shape=None, axes=None, overwrite_x=False): + """ + Return inverse multidimensional discrete Fourier transform. + + The sequence can be of an arbitrary type. + + The returned array contains:: + + y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1] + x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i) + + where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``. + + For description of parameters see `fftn`. + + See Also + -------- + fftn : for detailed information. + + Examples + -------- + >>> from scipy.fftpack import fftn, ifftn + >>> import numpy as np + >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16)) + >>> np.allclose(y, ifftn(fftn(y))) + True + + """ + shape = _good_shape(x, shape, axes) + return _pocketfft.ifftn(x, shape, axes, None, overwrite_x) + + +def fft2(x, shape=None, axes=(-2,-1), overwrite_x=False): + """ + 2-D discrete Fourier transform. + + Return the 2-D discrete Fourier transform of the 2-D argument + `x`. + + See Also + -------- + fftn : for detailed information. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import fft2, ifft2 + >>> y = np.mgrid[:5, :5][0] + >>> y + array([[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]]) + >>> np.allclose(y, ifft2(fft2(y))) + True + """ + return fftn(x,shape,axes,overwrite_x) + + +def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False): + """ + 2-D discrete inverse Fourier transform of real or complex sequence. + + Return inverse 2-D discrete Fourier transform of + arbitrary type sequence x. + + See `ifft` for more information. + + See Also + -------- + fft2, ifft + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import fft2, ifft2 + >>> y = np.mgrid[:5, :5][0] + >>> y + array([[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]]) + >>> np.allclose(y, fft2(ifft2(y))) + True + + """ + return ifftn(x,shape,axes,overwrite_x) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_helper.py b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..ee0dd7b0f8d6dce5fe717fe585f5af82a7d0c651 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_helper.py @@ -0,0 +1,115 @@ +import operator + +import numpy as np +from numpy.fft import fftshift, ifftshift, fftfreq + +import scipy.fft._pocketfft.helper as _helper + +__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len'] + + +def rfftfreq(n, d=1.0): + """DFT sample frequencies (for usage with rfft, irfft). + + The returned float array contains the frequency bins in + cycles/unit (with zero at the start) given a window length `n` and a + sample spacing `d`:: + + f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even + f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing. Default is 1. + + Returns + ------- + out : ndarray + The array of length `n`, containing the sample frequencies. + + Examples + -------- + >>> import numpy as np + >>> from scipy import fftpack + >>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> sig_fft = fftpack.rfft(sig) + >>> n = sig_fft.size + >>> timestep = 0.1 + >>> freq = fftpack.rfftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 1.25, 2.5 , 2.5 , 3.75, 3.75, 5. ]) + + """ + n = operator.index(n) + if n < 0: + raise ValueError("n = %s is not valid. " + "n must be a nonnegative integer." % n) + + return (np.arange(1, n + 1, dtype=int) // 2) / float(n * d) + + +def next_fast_len(target): + """ + Find the next fast size of input data to `fft`, for zero-padding, etc. + + SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this + returns the next composite of the prime factors 2, 3, and 5 which is + greater than or equal to `target`. (These are also known as 5-smooth + numbers, regular numbers, or Hamming numbers.) + + Parameters + ---------- + target : int + Length to start searching from. Must be a positive integer. + + Returns + ------- + out : int + The first 5-smooth number greater than or equal to `target`. + + Notes + ----- + .. versionadded:: 0.18.0 + + Examples + -------- + On a particular machine, an FFT of prime length takes 133 ms: + + >>> from scipy import fftpack + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> min_len = 10007 # prime length is worst case for speed + >>> a = rng.standard_normal(min_len) + >>> b = fftpack.fft(a) + + Zero-padding to the next 5-smooth length reduces computation time to + 211 us, a speedup of 630 times: + + >>> fftpack.next_fast_len(min_len) + 10125 + >>> b = fftpack.fft(a, 10125) + + Rounding up to the next power of 2 is not optimal, taking 367 us to + compute, 1.7 times as long as the 5-smooth size: + + >>> b = fftpack.fft(a, 16384) + + """ + # Real transforms use regular sizes so this is backwards compatible + return _helper.good_size(target, True) + + +def _good_shape(x, shape, axes): + """Ensure that shape argument is valid for scipy.fftpack + + scipy.fftpack does not support len(shape) < x.ndim when axes is not given. + """ + if shape is not None and axes is None: + shape = _helper._iterable_of_int(shape, 'shape') + if len(shape) != np.ndim(x): + raise ValueError("when given, axes and shape arguments" + " have to be of the same length") + return shape diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_realtransforms.py b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_realtransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..f56f68fce4ea447b6b946b14d7610dc4ef07c47c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_realtransforms.py @@ -0,0 +1,598 @@ +""" +Real spectrum transforms (DCT, DST, MDCT) +""" + +__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'] + +from scipy.fft import _pocketfft +from ._helper import _good_shape + +_inverse_typemap = {1: 1, 2: 3, 3: 2, 4: 4} + + +def dctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): + """ + Return multidimensional Discrete Cosine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the ith dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + Axes along which the DCT is computed. + The default is over all axes. + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idctn : Inverse multidimensional DCT + + Notes + ----- + For full details of the DCT types and normalization modes, as well as + references, see `dct`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import dctn, idctn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho')) + True + + """ + shape = _good_shape(x, shape, axes) + return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x) + + +def idctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): + """ + Return multidimensional Discrete Cosine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the ith dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + Axes along which the IDCT is computed. + The default is over all axes. + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + dctn : multidimensional DCT + + Notes + ----- + For full details of the IDCT types and normalization modes, as well as + references, see `idct`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import dctn, idctn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho')) + True + + """ + type = _inverse_typemap[type] + shape = _good_shape(x, shape, axes) + return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x) + + +def dstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): + """ + Return multidimensional Discrete Sine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the ith dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + Axes along which the DCT is computed. + The default is over all axes. + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idstn : Inverse multidimensional DST + + Notes + ----- + For full details of the DST types and normalization modes, as well as + references, see `dst`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import dstn, idstn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho')) + True + + """ + shape = _good_shape(x, shape, axes) + return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x) + + +def idstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): + """ + Return multidimensional Discrete Sine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the ith dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + Axes along which the IDST is computed. + The default is over all axes. + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + dstn : multidimensional DST + + Notes + ----- + For full details of the IDST types and normalization modes, as well as + references, see `idst`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import dstn, idstn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho')) + True + + """ + type = _inverse_typemap[type] + shape = _good_shape(x, shape, axes) + return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x) + + +def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): + r""" + Return the Discrete Cosine Transform of arbitrary type sequence x. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the dct is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idct : Inverse DCT + + Notes + ----- + For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to + MATLAB ``dct(x)``. + + There are, theoretically, 8 types of the DCT, only the first 4 types are + implemented in scipy. 'The' DCT generally refers to DCT type 2, and 'the' + Inverse DCT generally refers to DCT type 3. + + **Type I** + + There are several definitions of the DCT-I; we use the following + (for ``norm=None``) + + .. math:: + + y_k = x_0 + (-1)^k x_{N-1} + 2 \sum_{n=1}^{N-2} x_n \cos\left( + \frac{\pi k n}{N-1} \right) + + If ``norm='ortho'``, ``x[0]`` and ``x[N-1]`` are multiplied by a scaling + factor of :math:`\sqrt{2}`, and ``y[k]`` is multiplied by a scaling factor + ``f`` + + .. math:: + + f = \begin{cases} + \frac{1}{2}\sqrt{\frac{1}{N-1}} & \text{if }k=0\text{ or }N-1, \\ + \frac{1}{2}\sqrt{\frac{2}{N-1}} & \text{otherwise} \end{cases} + + .. versionadded:: 1.2.0 + Orthonormalization in DCT-I. + + .. note:: + The DCT-I is only supported for input size > 1. + + **Type II** + + There are several definitions of the DCT-II; we use the following + (for ``norm=None``) + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi k(2n+1)}{2N} \right) + + If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f`` + + .. math:: + f = \begin{cases} + \sqrt{\frac{1}{4N}} & \text{if }k=0, \\ + \sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases} + + which makes the corresponding matrix of coefficients orthonormal + (``O @ O.T = np.eye(N)``). + + **Type III** + + There are several definitions, we use the following (for ``norm=None``) + + .. math:: + + y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n \cos\left(\frac{\pi(2k+1)n}{2N}\right) + + or, for ``norm='ortho'`` + + .. math:: + + y_k = \frac{x_0}{\sqrt{N}} + \sqrt{\frac{2}{N}} \sum_{n=1}^{N-1} x_n + \cos\left(\frac{\pi(2k+1)n}{2N}\right) + + The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up + to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of + the orthonormalized DCT-II. + + **Type IV** + + There are several definitions of the DCT-IV; we use the following + (for ``norm=None``) + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi(2k+1)(2n+1)}{4N} \right) + + If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f`` + + .. math:: + + f = \frac{1}{\sqrt{2N}} + + .. versionadded:: 1.2.0 + Support for DCT-IV. + + References + ---------- + .. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J. + Makhoul, `IEEE Transactions on acoustics, speech and signal + processing` vol. 28(1), pp. 27-34, + :doi:`10.1109/TASSP.1980.1163351` (1980). + .. [2] Wikipedia, "Discrete cosine transform", + https://en.wikipedia.org/wiki/Discrete_cosine_transform + + Examples + -------- + The Type 1 DCT is equivalent to the FFT (though faster) for real, + even-symmetrical inputs. The output is also real and even-symmetrical. + Half of the FFT input is used to generate half of the FFT output: + + >>> from scipy.fftpack import fft, dct + >>> import numpy as np + >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real + array([ 30., -8., 6., -2., 6., -8.]) + >>> dct(np.array([4., 3., 5., 10.]), 1) + array([ 30., -8., 6., -2.]) + + """ + return _pocketfft.dct(x, type, n, axis, norm, overwrite_x) + + +def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): + """ + Return the Inverse Discrete Cosine Transform of an arbitrary type sequence. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the idct is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + idct : ndarray of real + The transformed input array. + + See Also + -------- + dct : Forward DCT + + Notes + ----- + For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to + MATLAB ``idct(x)``. + + 'The' IDCT is the IDCT of type 2, which is the same as DCT of type 3. + + IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type + 3, and IDCT of type 3 is the DCT of type 2. IDCT of type 4 is the DCT + of type 4. For the definition of these types, see `dct`. + + Examples + -------- + The Type 1 DCT is equivalent to the DFT for real, even-symmetrical + inputs. The output is also real and even-symmetrical. Half of the IFFT + input is used to generate half of the IFFT output: + + >>> from scipy.fftpack import ifft, idct + >>> import numpy as np + >>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real + array([ 4., 3., 5., 10., 5., 3.]) + >>> idct(np.array([ 30., -8., 6., -2.]), 1) / 6 + array([ 4., 3., 5., 10.]) + + """ + type = _inverse_typemap[type] + return _pocketfft.dct(x, type, n, axis, norm, overwrite_x) + + +def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): + r""" + Return the Discrete Sine Transform of arbitrary type sequence x. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the dst is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + dst : ndarray of reals + The transformed input array. + + See Also + -------- + idst : Inverse DST + + Notes + ----- + For a single dimension array ``x``. + + There are, theoretically, 8 types of the DST for different combinations of + even/odd boundary conditions and boundary off sets [1]_, only the first + 4 types are implemented in scipy. + + **Type I** + + There are several definitions of the DST-I; we use the following + for ``norm=None``. DST-I assumes the input is odd around `n=-1` and `n=N`. + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(n+1)}{N+1}\right) + + Note that the DST-I is only supported for input size > 1. + The (unnormalized) DST-I is its own inverse, up to a factor `2(N+1)`. + The orthonormalized DST-I is exactly its own inverse. + + **Type II** + + There are several definitions of the DST-II; we use the following for + ``norm=None``. DST-II assumes the input is odd around `n=-1/2` and + `n=N-1/2`; the output is odd around :math:`k=-1` and even around `k=N-1` + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(2n+1)}{2N}\right) + + if ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f`` + + .. math:: + + f = \begin{cases} + \sqrt{\frac{1}{4N}} & \text{if }k = 0, \\ + \sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases} + + **Type III** + + There are several definitions of the DST-III, we use the following (for + ``norm=None``). DST-III assumes the input is odd around `n=-1` and even + around `n=N-1` + + .. math:: + + y_k = (-1)^k x_{N-1} + 2 \sum_{n=0}^{N-2} x_n \sin\left( + \frac{\pi(2k+1)(n+1)}{2N}\right) + + The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up + to a factor `2N`. The orthonormalized DST-III is exactly the inverse of the + orthonormalized DST-II. + + .. versionadded:: 0.11.0 + + **Type IV** + + There are several definitions of the DST-IV, we use the following (for + ``norm=None``). DST-IV assumes the input is odd around `n=-0.5` and even + around `n=N-0.5` + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(2k+1)(2n+1)}{4N}\right) + + The (unnormalized) DST-IV is its own inverse, up to a factor `2N`. The + orthonormalized DST-IV is exactly its own inverse. + + .. versionadded:: 1.2.0 + Support for DST-IV. + + References + ---------- + .. [1] Wikipedia, "Discrete sine transform", + https://en.wikipedia.org/wiki/Discrete_sine_transform + + """ + return _pocketfft.dst(x, type, n, axis, norm, overwrite_x) + + +def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): + """ + Return the Inverse Discrete Sine Transform of an arbitrary type sequence. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the idst is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + idst : ndarray of real + The transformed input array. + + See Also + -------- + dst : Forward DST + + Notes + ----- + 'The' IDST is the IDST of type 2, which is the same as DST of type 3. + + IDST of type 1 is the DST of type 1, IDST of type 2 is the DST of type + 3, and IDST of type 3 is the DST of type 2. For the definition of these + types, see `dst`. + + .. versionadded:: 0.11.0 + + """ + type = _inverse_typemap[type] + return _pocketfft.dst(x, type, n, axis, norm, overwrite_x) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a52eeb49d01317365bd80950faefd7117d9e1fec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/helper.py b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..fcc7000c215f8a7605a2a59b5767b27b2fcd969d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/helper.py @@ -0,0 +1,19 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.fftpack` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="fftpack", module="helper", + private_modules=["_helper"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/pseudo_diffs.py b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/pseudo_diffs.py new file mode 100644 index 0000000000000000000000000000000000000000..07a245cdc41edfc4904b8d6e5d5cd4b76a4f3328 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/pseudo_diffs.py @@ -0,0 +1,22 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.fftpack` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'diff', + 'tilbert', 'itilbert', 'hilbert', 'ihilbert', + 'cs_diff', 'cc_diff', 'sc_diff', 'ss_diff', + 'shift', 'iscomplexobj', 'convolve' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="fftpack", module="pseudo_diffs", + private_modules=["_pseudo_diffs"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67fa3fe35e6809bda1fdf9639714cf72ad30ac70 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44f160c5a5312c4aa645d7948d02bd9ae7eab2df Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d5e1483621b3d800e1570c881710b9140490892 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f694ce150bb14a6cacaf02023e4ae96efb494e11 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ecbe253c317451ae6037e60b4e71d5b2b4f4374 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2911a4193d386beb23f9d316841e4c77217d969b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_basic.py b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..a7c4b1de867fb4eadc72e17e2e70a02c9ba190a5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_basic.py @@ -0,0 +1,873 @@ +# Created by Pearu Peterson, September 2002 + +from numpy.testing import (assert_, assert_equal, assert_array_almost_equal, + assert_array_almost_equal_nulp, assert_array_less) +import pytest +from pytest import raises as assert_raises +from scipy.fftpack import ifft, fft, fftn, ifftn, rfft, irfft, fft2 + +from numpy import (arange, array, asarray, zeros, dot, exp, pi, + swapaxes, double, cdouble) +import numpy as np +import numpy.fft +from numpy.random import rand + +# "large" composite numbers supported by FFTPACK +LARGE_COMPOSITE_SIZES = [ + 2**13, + 2**5 * 3**5, + 2**3 * 3**3 * 5**2, +] +SMALL_COMPOSITE_SIZES = [ + 2, + 2*3*5, + 2*2*3*3, +] +# prime +LARGE_PRIME_SIZES = [ + 2011 +] +SMALL_PRIME_SIZES = [ + 29 +] + + +def _assert_close_in_norm(x, y, rtol, size, rdt): + # helper function for testing + err_msg = f"size: {size} rdt: {rdt}" + assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg) + + +def random(size): + return rand(*size) + + +def direct_dft(x): + x = asarray(x) + n = len(x) + y = zeros(n, dtype=cdouble) + w = -arange(n)*(2j*pi/n) + for i in range(n): + y[i] = dot(exp(i*w), x) + return y + + +def direct_idft(x): + x = asarray(x) + n = len(x) + y = zeros(n, dtype=cdouble) + w = arange(n)*(2j*pi/n) + for i in range(n): + y[i] = dot(exp(i*w), x)/n + return y + + +def direct_dftn(x): + x = asarray(x) + for axis in range(len(x.shape)): + x = fft(x, axis=axis) + return x + + +def direct_idftn(x): + x = asarray(x) + for axis in range(len(x.shape)): + x = ifft(x, axis=axis) + return x + + +def direct_rdft(x): + x = asarray(x) + n = len(x) + w = -arange(n)*(2j*pi/n) + r = zeros(n, dtype=double) + for i in range(n//2+1): + y = dot(exp(i*w), x) + if i: + r[2*i-1] = y.real + if 2*i < n: + r[2*i] = y.imag + else: + r[0] = y.real + return r + + +def direct_irdft(x): + x = asarray(x) + n = len(x) + x1 = zeros(n, dtype=cdouble) + for i in range(n//2+1): + if i: + if 2*i < n: + x1[i] = x[2*i-1] + 1j*x[2*i] + x1[n-i] = x[2*i-1] - 1j*x[2*i] + else: + x1[i] = x[2*i-1] + else: + x1[0] = x[0] + return direct_idft(x1).real + + +class _TestFFTBase: + def setup_method(self): + self.cdt = None + self.rdt = None + np.random.seed(1234) + + def test_definition(self): + x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt) + y = fft(x) + assert_equal(y.dtype, self.cdt) + y1 = direct_dft(x) + assert_array_almost_equal(y,y1) + x = np.array([1,2,3,4+0j,5], dtype=self.cdt) + assert_array_almost_equal(fft(x),direct_dft(x)) + + def test_n_argument_real(self): + x1 = np.array([1,2,3,4], dtype=self.rdt) + x2 = np.array([1,2,3,4], dtype=self.rdt) + y = fft([x1,x2],n=4) + assert_equal(y.dtype, self.cdt) + assert_equal(y.shape,(2,4)) + assert_array_almost_equal(y[0],direct_dft(x1)) + assert_array_almost_equal(y[1],direct_dft(x2)) + + def _test_n_argument_complex(self): + x1 = np.array([1,2,3,4+1j], dtype=self.cdt) + x2 = np.array([1,2,3,4+1j], dtype=self.cdt) + y = fft([x1,x2],n=4) + assert_equal(y.dtype, self.cdt) + assert_equal(y.shape,(2,4)) + assert_array_almost_equal(y[0],direct_dft(x1)) + assert_array_almost_equal(y[1],direct_dft(x2)) + + def test_invalid_sizes(self): + assert_raises(ValueError, fft, []) + assert_raises(ValueError, fft, [[1,1],[2,2]], -5) + + +class TestDoubleFFT(_TestFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + + +class TestSingleFFT(_TestFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + reason = ("single-precision FFT implementation is partially disabled, " + "until accuracy issues with large prime powers are resolved") + + @pytest.mark.xfail(run=False, reason=reason) + def test_notice(self): + pass + + +class TestFloat16FFT: + + def test_1_argument_real(self): + x1 = np.array([1, 2, 3, 4], dtype=np.float16) + y = fft(x1, n=4) + assert_equal(y.dtype, np.complex64) + assert_equal(y.shape, (4, )) + assert_array_almost_equal(y, direct_dft(x1.astype(np.float32))) + + def test_n_argument_real(self): + x1 = np.array([1, 2, 3, 4], dtype=np.float16) + x2 = np.array([1, 2, 3, 4], dtype=np.float16) + y = fft([x1, x2], n=4) + assert_equal(y.dtype, np.complex64) + assert_equal(y.shape, (2, 4)) + assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32))) + assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32))) + + +class _TestIFFTBase: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt) + y = ifft(x) + y1 = direct_idft(x) + assert_equal(y.dtype, self.cdt) + assert_array_almost_equal(y,y1) + + x = np.array([1,2,3,4+0j,5], self.cdt) + assert_array_almost_equal(ifft(x),direct_idft(x)) + + def test_definition_real(self): + x = np.array([1,2,3,4,1,2,3,4], self.rdt) + y = ifft(x) + assert_equal(y.dtype, self.cdt) + y1 = direct_idft(x) + assert_array_almost_equal(y,y1) + + x = np.array([1,2,3,4,5], dtype=self.rdt) + assert_equal(y.dtype, self.cdt) + assert_array_almost_equal(ifft(x),direct_idft(x)) + + def test_random_complex(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.cdt) + x = random([size]).astype(self.cdt) + 1j*x + y1 = ifft(fft(x)) + y2 = fft(ifft(x)) + assert_equal(y1.dtype, self.cdt) + assert_equal(y2.dtype, self.cdt) + assert_array_almost_equal(y1, x) + assert_array_almost_equal(y2, x) + + def test_random_real(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.rdt) + y1 = ifft(fft(x)) + y2 = fft(ifft(x)) + assert_equal(y1.dtype, self.cdt) + assert_equal(y2.dtype, self.cdt) + assert_array_almost_equal(y1, x) + assert_array_almost_equal(y2, x) + + def test_size_accuracy(self): + # Sanity check for the accuracy for prime and non-prime sized inputs + if self.rdt == np.float32: + rtol = 1e-5 + elif self.rdt == np.float64: + rtol = 1e-10 + + for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: + np.random.seed(1234) + x = np.random.rand(size).astype(self.rdt) + y = ifft(fft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + y = fft(ifft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + + x = (x + 1j*np.random.rand(size)).astype(self.cdt) + y = ifft(fft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + y = fft(ifft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + + def test_invalid_sizes(self): + assert_raises(ValueError, ifft, []) + assert_raises(ValueError, ifft, [[1,1],[2,2]], -5) + + +class TestDoubleIFFT(_TestIFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + + +class TestSingleIFFT(_TestIFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + +class _TestRFFTBase: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]: + x = np.array(t, dtype=self.rdt) + y = rfft(x) + y1 = direct_rdft(x) + assert_array_almost_equal(y,y1) + assert_equal(y.dtype, self.rdt) + + def test_invalid_sizes(self): + assert_raises(ValueError, rfft, []) + assert_raises(ValueError, rfft, [[1,1],[2,2]], -5) + + # See gh-5790 + class MockSeries: + def __init__(self, data): + self.data = np.asarray(data) + + def __getattr__(self, item): + try: + return getattr(self.data, item) + except AttributeError as e: + raise AttributeError("'MockSeries' object " + f"has no attribute '{item}'") from e + + def test_non_ndarray_with_dtype(self): + x = np.array([1., 2., 3., 4., 5.]) + xs = _TestRFFTBase.MockSeries(x) + + expected = [1, 2, 3, 4, 5] + rfft(xs) + + # Data should not have been overwritten + assert_equal(x, expected) + assert_equal(xs.data, expected) + + def test_complex_input(self): + assert_raises(TypeError, rfft, np.arange(4, dtype=np.complex64)) + + +class TestRFFTDouble(_TestRFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + + +class TestRFFTSingle(_TestRFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + +class _TestIRFFTBase: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x1 = [1,2,3,4,1,2,3,4] + x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j] + x2 = [1,2,3,4,1,2,3,4,5] + x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j] + + def _test(x, xr): + y = irfft(np.array(x, dtype=self.rdt)) + y1 = direct_irdft(x) + assert_equal(y.dtype, self.rdt) + assert_array_almost_equal(y,y1, decimal=self.ndec) + assert_array_almost_equal(y,ifft(xr), decimal=self.ndec) + + _test(x1, x1_1) + _test(x2, x2_1) + + def test_random_real(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.rdt) + y1 = irfft(rfft(x)) + y2 = rfft(irfft(x)) + assert_equal(y1.dtype, self.rdt) + assert_equal(y2.dtype, self.rdt) + assert_array_almost_equal(y1, x, decimal=self.ndec, + err_msg="size=%d" % size) + assert_array_almost_equal(y2, x, decimal=self.ndec, + err_msg="size=%d" % size) + + def test_size_accuracy(self): + # Sanity check for the accuracy for prime and non-prime sized inputs + if self.rdt == np.float32: + rtol = 1e-5 + elif self.rdt == np.float64: + rtol = 1e-10 + + for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: + np.random.seed(1234) + x = np.random.rand(size).astype(self.rdt) + y = irfft(rfft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + y = rfft(irfft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + + def test_invalid_sizes(self): + assert_raises(ValueError, irfft, []) + assert_raises(ValueError, irfft, [[1,1],[2,2]], -5) + + def test_complex_input(self): + assert_raises(TypeError, irfft, np.arange(4, dtype=np.complex64)) + + +# self.ndec is bogus; we should have a assert_array_approx_equal for number of +# significant digits + +class TestIRFFTDouble(_TestIRFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + self.ndec = 14 + + +class TestIRFFTSingle(_TestIRFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + self.ndec = 5 + + +class Testfft2: + def setup_method(self): + np.random.seed(1234) + + def test_regression_244(self): + """FFT returns wrong result with axes parameter.""" + # fftn (and hence fft2) used to break when both axes and shape were + # used + x = numpy.ones((4, 4, 2)) + y = fft2(x, shape=(8, 8), axes=(-3, -2)) + y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2)) + assert_array_almost_equal(y, y_r) + + def test_invalid_sizes(self): + assert_raises(ValueError, fft2, [[]]) + assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3)) + + +class TestFftnSingle: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(np.array(x, np.float32)) + assert_(y.dtype == np.complex64, + msg="double precision output with single precision") + + y_r = np.array(fftn(x), np.complex64) + assert_array_almost_equal_nulp(y, y_r) + + @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES) + def test_size_accuracy_small(self, size): + x = np.random.rand(size, size) + 1j*np.random.rand(size, size) + y1 = fftn(x.real.astype(np.float32)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2000) + + @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES) + def test_size_accuracy_large(self, size): + x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3) + y1 = fftn(x.real.astype(np.float32)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2000) + + def test_definition_float16(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(np.array(x, np.float16)) + assert_equal(y.dtype, np.complex64) + y_r = np.array(fftn(x), np.complex64) + assert_array_almost_equal_nulp(y, y_r) + + @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES) + def test_float16_input_small(self, size): + x = np.random.rand(size, size) + 1j*np.random.rand(size, size) + y1 = fftn(x.real.astype(np.float16)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 5e5) + + @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES) + def test_float16_input_large(self, size): + x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3) + y1 = fftn(x.real.astype(np.float16)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2e6) + + +class TestFftn: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(x) + assert_array_almost_equal(y, direct_dftn(x)) + + x = random((20, 26)) + assert_array_almost_equal(fftn(x), direct_dftn(x)) + + x = random((5, 4, 3, 20)) + assert_array_almost_equal(fftn(x), direct_dftn(x)) + + def test_axes_argument(self): + # plane == ji_plane, x== kji_space + plane1 = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + plane2 = [[10, 11, 12], + [13, 14, 15], + [16, 17, 18]] + plane3 = [[19, 20, 21], + [22, 23, 24], + [25, 26, 27]] + ki_plane1 = [[1, 2, 3], + [10, 11, 12], + [19, 20, 21]] + ki_plane2 = [[4, 5, 6], + [13, 14, 15], + [22, 23, 24]] + ki_plane3 = [[7, 8, 9], + [16, 17, 18], + [25, 26, 27]] + jk_plane1 = [[1, 10, 19], + [4, 13, 22], + [7, 16, 25]] + jk_plane2 = [[2, 11, 20], + [5, 14, 23], + [8, 17, 26]] + jk_plane3 = [[3, 12, 21], + [6, 15, 24], + [9, 18, 27]] + kj_plane1 = [[1, 4, 7], + [10, 13, 16], [19, 22, 25]] + kj_plane2 = [[2, 5, 8], + [11, 14, 17], [20, 23, 26]] + kj_plane3 = [[3, 6, 9], + [12, 15, 18], [21, 24, 27]] + ij_plane1 = [[1, 4, 7], + [2, 5, 8], + [3, 6, 9]] + ij_plane2 = [[10, 13, 16], + [11, 14, 17], + [12, 15, 18]] + ij_plane3 = [[19, 22, 25], + [20, 23, 26], + [21, 24, 27]] + ik_plane1 = [[1, 10, 19], + [2, 11, 20], + [3, 12, 21]] + ik_plane2 = [[4, 13, 22], + [5, 14, 23], + [6, 15, 24]] + ik_plane3 = [[7, 16, 25], + [8, 17, 26], + [9, 18, 27]] + ijk_space = [jk_plane1, jk_plane2, jk_plane3] + ikj_space = [kj_plane1, kj_plane2, kj_plane3] + jik_space = [ik_plane1, ik_plane2, ik_plane3] + jki_space = [ki_plane1, ki_plane2, ki_plane3] + kij_space = [ij_plane1, ij_plane2, ij_plane3] + x = array([plane1, plane2, plane3]) + + assert_array_almost_equal(fftn(x), + fftn(x, axes=(-3, -2, -1))) # kji_space + assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2))) + assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1))) + y = fftn(x, axes=(2, 1, 0)) # ijk_space + assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space)) + y = fftn(x, axes=(2, 0, 1)) # ikj_space + assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2), + fftn(ikj_space)) + y = fftn(x, axes=(1, 2, 0)) # jik_space + assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2), + fftn(jik_space)) + y = fftn(x, axes=(1, 0, 2)) # jki_space + assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space)) + y = fftn(x, axes=(0, 2, 1)) # kij_space + assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space)) + + y = fftn(x, axes=(-2, -1)) # ji_plane + assert_array_almost_equal(fftn(plane1), y[0]) + assert_array_almost_equal(fftn(plane2), y[1]) + assert_array_almost_equal(fftn(plane3), y[2]) + + y = fftn(x, axes=(1, 2)) # ji_plane + assert_array_almost_equal(fftn(plane1), y[0]) + assert_array_almost_equal(fftn(plane2), y[1]) + assert_array_almost_equal(fftn(plane3), y[2]) + + y = fftn(x, axes=(-3, -2)) # kj_plane + assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0]) + assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1]) + assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2]) + + y = fftn(x, axes=(-3, -1)) # ki_plane + assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :]) + assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :]) + assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :]) + + y = fftn(x, axes=(-1, -2)) # ij_plane + assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1)) + assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1)) + assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1)) + + y = fftn(x, axes=(-1, -3)) # ik_plane + assert_array_almost_equal(fftn(ik_plane1), + swapaxes(y[:, 0, :], -1, -2)) + assert_array_almost_equal(fftn(ik_plane2), + swapaxes(y[:, 1, :], -1, -2)) + assert_array_almost_equal(fftn(ik_plane3), + swapaxes(y[:, 2, :], -1, -2)) + + y = fftn(x, axes=(-2, -3)) # jk_plane + assert_array_almost_equal(fftn(jk_plane1), + swapaxes(y[:, :, 0], -1, -2)) + assert_array_almost_equal(fftn(jk_plane2), + swapaxes(y[:, :, 1], -1, -2)) + assert_array_almost_equal(fftn(jk_plane3), + swapaxes(y[:, :, 2], -1, -2)) + + y = fftn(x, axes=(-1,)) # i_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :]) + y = fftn(x, axes=(-2,)) # j_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j]) + y = fftn(x, axes=(0,)) # k_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j]) + + y = fftn(x, axes=()) # point + assert_array_almost_equal(y, x) + + def test_shape_argument(self): + small_x = [[1, 2, 3], + [4, 5, 6]] + large_x1 = [[1, 2, 3, 0], + [4, 5, 6, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]] + + y = fftn(small_x, shape=(4, 4)) + assert_array_almost_equal(y, fftn(large_x1)) + + y = fftn(small_x, shape=(3, 4)) + assert_array_almost_equal(y, fftn(large_x1[:-1])) + + def test_shape_axes_argument(self): + small_x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + large_x1 = array([[1, 2, 3, 0], + [4, 5, 6, 0], + [7, 8, 9, 0], + [0, 0, 0, 0]]) + y = fftn(small_x, shape=(4, 4), axes=(-2, -1)) + assert_array_almost_equal(y, fftn(large_x1)) + y = fftn(small_x, shape=(4, 4), axes=(-1, -2)) + + assert_array_almost_equal(y, swapaxes( + fftn(swapaxes(large_x1, -1, -2)), -1, -2)) + + def test_shape_axes_argument2(self): + # Change shape of the last axis + x = numpy.random.random((10, 5, 3, 7)) + y = fftn(x, axes=(-1,), shape=(8,)) + assert_array_almost_equal(y, fft(x, axis=-1, n=8)) + + # Change shape of an arbitrary axis which is not the last one + x = numpy.random.random((10, 5, 3, 7)) + y = fftn(x, axes=(-2,), shape=(8,)) + assert_array_almost_equal(y, fft(x, axis=-2, n=8)) + + # Change shape of axes: cf #244, where shape and axes were mixed up + x = numpy.random.random((4, 4, 2)) + y = fftn(x, axes=(-3, -2), shape=(8, 8)) + assert_array_almost_equal(y, + numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8))) + + def test_shape_argument_more(self): + x = zeros((4, 4, 2)) + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fftn(x, shape=(8, 8, 2, 1)) + + def test_invalid_sizes(self): + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[1, 0\]\) specified"): + fftn([[]]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[4, -3\]\) specified"): + fftn([[1, 1], [2, 2]], (4, -3)) + + +class TestIfftn: + dtype = None + cdtype = None + + def setup_method(self): + np.random.seed(1234) + + @pytest.mark.parametrize('dtype,cdtype,maxnlp', + [(np.float64, np.complex128, 2000), + (np.float32, np.complex64, 3500)]) + def test_definition(self, dtype, cdtype, maxnlp): + x = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], dtype=dtype) + y = ifftn(x) + assert_equal(y.dtype, cdtype) + assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp) + + x = random((20, 26)) + assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp) + + x = random((5, 4, 3, 20)) + assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp) + + @pytest.mark.parametrize('maxnlp', [2000, 3500]) + @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92]) + def test_random_complex(self, maxnlp, size): + x = random([size, size]) + 1j*random([size, size]) + assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp) + assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp) + + def test_invalid_sizes(self): + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[1, 0\]\) specified"): + ifftn([[]]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[4, -3\]\) specified"): + ifftn([[1, 1], [2, 2]], (4, -3)) + + +class FakeArray: + def __init__(self, data): + self._data = data + self.__array_interface__ = data.__array_interface__ + + +class FakeArray2: + def __init__(self, data): + self._data = data + + def __array__(self, dtype=None, copy=None): + return self._data + + +class TestOverwrite: + """Check input overwrite behavior of the FFT functions.""" + + real_dtypes = (np.float32, np.float64) + dtypes = real_dtypes + (np.complex64, np.complex128) + fftsizes = [8, 16, 32] + + def _check(self, x, routine, fftsize, axis, overwrite_x): + x2 = x.copy() + for fake in [lambda x: x, FakeArray, FakeArray2]: + routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x) + + sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format( + routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x) + if not overwrite_x: + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes, + fftsize, overwrite_x): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + self._check(data, routine, fftsize, axis, + overwrite_x=overwrite_x) + + @pytest.mark.parametrize('dtype', dtypes) + @pytest.mark.parametrize('fftsize', fftsizes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), -1), + ((16, 2), 0), + ((2, 16), 1)]) + def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes): + overwritable = (np.complex128, np.complex64) + self._check_1d(fft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + self._check_1d(ifft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + + @pytest.mark.parametrize('dtype', real_dtypes) + @pytest.mark.parametrize('fftsize', fftsizes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), -1), + ((16, 2), 0), + ((2, 16), 1)]) + def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes): + overwritable = self.real_dtypes + self._check_1d(irfft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + self._check_1d(rfft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + + def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes, + overwrite_x): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + def fftshape_iter(shp): + if len(shp) <= 0: + yield () + else: + for j in (shp[0]//2, shp[0], shp[0]*2): + for rest in fftshape_iter(shp[1:]): + yield (j,) + rest + + if axes is None: + part_shape = shape + else: + part_shape = tuple(np.take(shape, axes)) + + for fftshape in fftshape_iter(part_shape): + self._check(data, routine, fftshape, axes, + overwrite_x=overwrite_x) + if data.ndim > 1: + self._check(data.T, routine, fftshape, axes, + overwrite_x=overwrite_x) + + @pytest.mark.parametrize('dtype', dtypes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), None), + ((16,), (0,)), + ((16, 2), (0,)), + ((2, 16), (1,)), + ((8, 16), None), + ((8, 16), (0, 1)), + ((8, 16, 2), (0, 1)), + ((8, 16, 2), (1, 2)), + ((8, 16, 2), (0,)), + ((8, 16, 2), (1,)), + ((8, 16, 2), (2,)), + ((8, 16, 2), None), + ((8, 16, 2), (0, 1, 2))]) + def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes): + overwritable = (np.complex128, np.complex64) + self._check_nd_one(fftn, dtype, shape, axes, overwritable, + overwrite_x) + self._check_nd_one(ifftn, dtype, shape, axes, overwritable, + overwrite_x) + + +@pytest.mark.parametrize('func', [fftn, ifftn, fft2]) +def test_shape_axes_ndarray(func): + # Test fftn and ifftn work with NumPy arrays for shape and axes arguments + # Regression test for gh-13342 + a = np.random.rand(10, 10) + + expect = func(a, shape=(5, 5)) + actual = func(a, shape=np.array([5, 5])) + assert_equal(expect, actual) + + expect = func(a, axes=(-1,)) + actual = func(a, axes=np.array([-1,])) + assert_equal(expect, actual) + + expect = func(a, shape=(4, 7), axes=(1, 0)) + actual = func(a, shape=np.array([4, 7]), axes=np.array([1, 0])) + assert_equal(expect, actual) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..5e7be04f3c0291502b50b101db82d299aadc7772 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py @@ -0,0 +1,54 @@ +# Created by Pearu Peterson, September 2002 + +__usage__ = """ +Build fftpack: + python setup_fftpack.py build +Run tests if scipy is installed: + python -c 'import scipy;scipy.fftpack.test()' +Run tests if fftpack is not installed: + python tests/test_helper.py [] +""" + +from numpy.testing import assert_array_almost_equal +from scipy.fftpack import fftshift, ifftshift, fftfreq, rfftfreq + +from numpy import pi, random + +class TestFFTShift: + + def test_definition(self): + x = [0,1,2,3,4,-4,-3,-2,-1] + y = [-4,-3,-2,-1,0,1,2,3,4] + assert_array_almost_equal(fftshift(x),y) + assert_array_almost_equal(ifftshift(y),x) + x = [0,1,2,3,4,-5,-4,-3,-2,-1] + y = [-5,-4,-3,-2,-1,0,1,2,3,4] + assert_array_almost_equal(fftshift(x),y) + assert_array_almost_equal(ifftshift(y),x) + + def test_inverse(self): + for n in [1,4,9,100,211]: + x = random.random((n,)) + assert_array_almost_equal(ifftshift(fftshift(x)),x) + + +class TestFFTFreq: + + def test_definition(self): + x = [0,1,2,3,4,-4,-3,-2,-1] + assert_array_almost_equal(9*fftfreq(9),x) + assert_array_almost_equal(9*pi*fftfreq(9,pi),x) + x = [0,1,2,3,4,-5,-4,-3,-2,-1] + assert_array_almost_equal(10*fftfreq(10),x) + assert_array_almost_equal(10*pi*fftfreq(10,pi),x) + + +class TestRFFTFreq: + + def test_definition(self): + x = [0,1,1,2,2,3,3,4,4] + assert_array_almost_equal(9*rfftfreq(9),x) + assert_array_almost_equal(9*pi*rfftfreq(9,pi),x) + x = [0,1,1,2,2,3,3,4,4,5] + assert_array_almost_equal(10*rfftfreq(10),x) + assert_array_almost_equal(10*pi*rfftfreq(10,pi),x) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_import.py b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_import.py new file mode 100644 index 0000000000000000000000000000000000000000..8a978d9651e54c3dd2b3ae5e15ac6635f08d8cc1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_import.py @@ -0,0 +1,31 @@ +"""Test possibility of patching fftpack with pyfftw. + +No module source outside of scipy.fftpack should contain an import of +the form `from scipy.fftpack import ...`, so that a simple replacement +of scipy.fftpack by the corresponding fftw interface completely swaps +the two FFT implementations. + +Because this simply inspects source files, we only need to run the test +on one version of Python. +""" + + +from pathlib import Path +import re +import tokenize +from numpy.testing import assert_ +import scipy + +class TestFFTPackImport: + def test_fftpack_import(self): + base = Path(scipy.__file__).parent + regexp = r"\s*from.+\.fftpack import .*\n" + for path in base.rglob("*.py"): + if base / "fftpack" in path.parents: + continue + # use tokenize to auto-detect encoding on systems where no + # default encoding is defined (e.g., LANG='C') + with tokenize.open(str(path)) as file: + assert_(all(not re.fullmatch(regexp, line) + for line in file), + f"{path} contains an import from fftpack") diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py new file mode 100644 index 0000000000000000000000000000000000000000..cec131caced4ccf9cf7c34255f7693769e2ebb12 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py @@ -0,0 +1,380 @@ +# Created by Pearu Peterson, September 2002 + +__usage__ = """ +Build fftpack: + python setup_fftpack.py build +Run tests if scipy is installed: + python -c 'import scipy;scipy.fftpack.test()' +Run tests if fftpack is not installed: + python tests/test_pseudo_diffs.py [] +""" + +from numpy.testing import (assert_equal, assert_almost_equal, + assert_array_almost_equal) +from scipy.fftpack import (diff, fft, ifft, tilbert, itilbert, hilbert, + ihilbert, shift, fftfreq, cs_diff, sc_diff, + ss_diff, cc_diff) + +import numpy as np +from numpy import arange, sin, cos, pi, exp, tanh, sum, sign +from numpy.random import random + + +def direct_diff(x,k=1,period=None): + fx = fft(x) + n = len(fx) + if period is None: + period = 2*pi + w = fftfreq(n)*2j*pi/period*n + if k < 0: + w = 1 / w**k + w[0] = 0.0 + else: + w = w**k + if n > 2000: + w[250:n-250] = 0.0 + return ifft(w*fx).real + + +def direct_tilbert(x,h=1,period=None): + fx = fft(x) + n = len(fx) + if period is None: + period = 2*pi + w = fftfreq(n)*h*2*pi/period*n + w[0] = 1 + w = 1j/tanh(w) + w[0] = 0j + return ifft(w*fx) + + +def direct_itilbert(x,h=1,period=None): + fx = fft(x) + n = len(fx) + if period is None: + period = 2*pi + w = fftfreq(n)*h*2*pi/period*n + w = -1j*tanh(w) + return ifft(w*fx) + + +def direct_hilbert(x): + fx = fft(x) + n = len(fx) + w = fftfreq(n)*n + w = 1j*sign(w) + return ifft(w*fx) + + +def direct_ihilbert(x): + return -direct_hilbert(x) + + +def direct_shift(x,a,period=None): + n = len(x) + if period is None: + k = fftfreq(n)*1j*n + else: + k = fftfreq(n)*2j*pi/period*n + return ifft(fft(x)*exp(k*a)).real + + +class TestDiff: + + def test_definition(self): + for n in [16,17,64,127,32]: + x = arange(n)*2*pi/n + assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x))) + assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2)) + assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3)) + assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4)) + assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5)) + assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3)) + assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4)) + assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x))) + assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2)) + assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3)) + assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4)) + assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x))) + assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8))) + assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8))) + for k in range(5): + assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k)) + assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k)) + + def test_period(self): + for n in [17,64]: + x = arange(n)/float(n) + assert_array_almost_equal(diff(sin(2*pi*x),period=1), + 2*pi*cos(2*pi*x)) + assert_array_almost_equal(diff(sin(2*pi*x),3,period=1), + -(2*pi)**3*cos(2*pi*x)) + + def test_sin(self): + for n in [32,64,77]: + x = arange(n)*2*pi/n + assert_array_almost_equal(diff(sin(x)),cos(x)) + assert_array_almost_equal(diff(cos(x)),-sin(x)) + assert_array_almost_equal(diff(sin(x),2),-sin(x)) + assert_array_almost_equal(diff(sin(x),4),sin(x)) + assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x)) + assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x))) + + def test_expr(self): + for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]: + x = arange(n)*2*pi/n + f = sin(x)*cos(4*x)+exp(sin(3*x)) + df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x)) + ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\ + - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x)) + d1 = diff(f) + assert_array_almost_equal(d1,df) + assert_array_almost_equal(diff(df),ddf) + assert_array_almost_equal(diff(f,2),ddf) + assert_array_almost_equal(diff(ddf,-1),df) + + def test_expr_large(self): + for n in [2048,4096]: + x = arange(n)*2*pi/n + f = sin(x)*cos(4*x)+exp(sin(3*x)) + df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x)) + ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\ + - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x)) + assert_array_almost_equal(diff(f),df) + assert_array_almost_equal(diff(df),ddf) + assert_array_almost_equal(diff(ddf,-1),df) + assert_array_almost_equal(diff(f,2),ddf) + + def test_int(self): + n = 64 + x = arange(n)*2*pi/n + assert_array_almost_equal(diff(sin(x),-1),-cos(x)) + assert_array_almost_equal(diff(sin(x),-2),-sin(x)) + assert_array_almost_equal(diff(sin(x),-4),sin(x)) + assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x)) + + def test_random_even(self): + for k in [0,2,4,6]: + for n in [60,32,64,56,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + # zeroing Nyquist mode: + f = diff(diff(f,1),-1) + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(diff(diff(f,k),-k),f) + assert_array_almost_equal(diff(diff(f,-k),k),f) + + def test_random_odd(self): + for k in [0,1,2,3,4,5,6]: + for n in [33,65,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(diff(diff(f,k),-k),f) + assert_array_almost_equal(diff(diff(f,-k),k),f) + + def test_zero_nyquist(self): + for k in [0,1,2,3,4,5,6]: + for n in [32,33,64,56,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + # zeroing Nyquist mode: + f = diff(diff(f,1),-1) + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(diff(diff(f,k),-k),f) + assert_array_almost_equal(diff(diff(f,-k),k),f) + + +class TestTilbert: + + def test_definition(self): + for h in [0.1,0.5,1,5.5,10]: + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + y = tilbert(sin(x),h) + y1 = direct_tilbert(sin(x),h) + assert_array_almost_equal(y,y1) + assert_array_almost_equal(tilbert(sin(x),h), + direct_tilbert(sin(x),h)) + assert_array_almost_equal(tilbert(sin(2*x),h), + direct_tilbert(sin(2*x),h)) + + def test_random_even(self): + for h in [0.1,0.5,1,5.5,10]: + for n in [32,64,56]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f) + + def test_random_odd(self): + for h in [0.1,0.5,1,5.5,10]: + for n in [33,65,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(itilbert(tilbert(f,h),h),f) + assert_array_almost_equal(tilbert(itilbert(f,h),h),f) + + +class TestITilbert: + + def test_definition(self): + for h in [0.1,0.5,1,5.5,10]: + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + y = itilbert(sin(x),h) + y1 = direct_itilbert(sin(x),h) + assert_array_almost_equal(y,y1) + assert_array_almost_equal(itilbert(sin(x),h), + direct_itilbert(sin(x),h)) + assert_array_almost_equal(itilbert(sin(2*x),h), + direct_itilbert(sin(2*x),h)) + + +class TestHilbert: + + def test_definition(self): + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + y = hilbert(sin(x)) + y1 = direct_hilbert(sin(x)) + assert_array_almost_equal(y,y1) + assert_array_almost_equal(hilbert(sin(2*x)), + direct_hilbert(sin(2*x))) + + def test_tilbert_relation(self): + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + f = sin(x)+cos(2*x)*sin(x) + y = hilbert(f) + y1 = direct_hilbert(f) + assert_array_almost_equal(y,y1) + y2 = tilbert(f,h=10) + assert_array_almost_equal(y,y2) + + def test_random_odd(self): + for n in [33,65,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(ihilbert(hilbert(f)),f) + assert_array_almost_equal(hilbert(ihilbert(f)),f) + + def test_random_even(self): + for n in [32,64,56]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + # zeroing Nyquist mode: + f = diff(diff(f,1),-1) + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f) + assert_array_almost_equal(hilbert(ihilbert(f)),f) + + +class TestIHilbert: + + def test_definition(self): + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + y = ihilbert(sin(x)) + y1 = direct_ihilbert(sin(x)) + assert_array_almost_equal(y,y1) + assert_array_almost_equal(ihilbert(sin(2*x)), + direct_ihilbert(sin(2*x))) + + def test_itilbert_relation(self): + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + f = sin(x)+cos(2*x)*sin(x) + y = ihilbert(f) + y1 = direct_ihilbert(f) + assert_array_almost_equal(y,y1) + y2 = itilbert(f,h=10) + assert_array_almost_equal(y,y2) + + +class TestShift: + + def test_definition(self): + for n in [18,17,64,127,32,2048,256]: + x = arange(n)*2*pi/n + for a in [0.1,3]: + assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a)) + assert_array_almost_equal(shift(sin(x),a),sin(x+a)) + assert_array_almost_equal(shift(cos(x),a),cos(x+a)) + assert_array_almost_equal(shift(cos(2*x)+sin(x),a), + cos(2*(x+a))+sin(x+a)) + assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a))) + assert_array_almost_equal(shift(sin(x),2*pi),sin(x)) + assert_array_almost_equal(shift(sin(x),pi),-sin(x)) + assert_array_almost_equal(shift(sin(x),pi/2),cos(x)) + + +class TestOverwrite: + """Check input overwrite behavior """ + + real_dtypes = (np.float32, np.float64) + dtypes = real_dtypes + (np.complex64, np.complex128) + + def _check(self, x, routine, *args, **kwargs): + x2 = x.copy() + routine(x2, *args, **kwargs) + sig = routine.__name__ + if args: + sig += repr(args) + if kwargs: + sig += repr(kwargs) + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + def _check_1d(self, routine, dtype, shape, *args, **kwargs): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + self._check(data, routine, *args, **kwargs) + + def test_diff(self): + for dtype in self.dtypes: + self._check_1d(diff, dtype, (16,)) + + def test_tilbert(self): + for dtype in self.dtypes: + self._check_1d(tilbert, dtype, (16,), 1.6) + + def test_itilbert(self): + for dtype in self.dtypes: + self._check_1d(itilbert, dtype, (16,), 1.6) + + def test_hilbert(self): + for dtype in self.dtypes: + self._check_1d(hilbert, dtype, (16,)) + + def test_cs_diff(self): + for dtype in self.dtypes: + self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0) + + def test_sc_diff(self): + for dtype in self.dtypes: + self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0) + + def test_ss_diff(self): + for dtype in self.dtypes: + self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0) + + def test_cc_diff(self): + for dtype in self.dtypes: + self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0) + + def test_shift(self): + for dtype in self.dtypes: + self._check_1d(shift, dtype, (16,), 1.0) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..6108d460c7864bdc5dd9425bddf93576fac5b39d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py @@ -0,0 +1,815 @@ +from os.path import join, dirname + +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_equal +import pytest +from pytest import raises as assert_raises + +from scipy.fftpack._realtransforms import ( + dct, idct, dst, idst, dctn, idctn, dstn, idstn) + +# Matlab reference data +MDATA = np.load(join(dirname(__file__), 'test.npz')) +X = [MDATA['x%d' % i] for i in range(8)] +Y = [MDATA['y%d' % i] for i in range(8)] + +# FFTW reference data: the data are organized as follows: +# * SIZES is an array containing all available sizes +# * for every type (1, 2, 3, 4) and every size, the array dct_type_size +# contains the output of the DCT applied to the input np.linspace(0, size-1, +# size) +FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz')) +FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz')) +FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes'] + + +def fftw_dct_ref(type, size, dt): + x = np.linspace(0, size-1, size).astype(dt) + dt = np.result_type(np.float32, dt) + if dt == np.float64: + data = FFTWDATA_DOUBLE + elif dt == np.float32: + data = FFTWDATA_SINGLE + else: + raise ValueError() + y = (data['dct_%d_%d' % (type, size)]).astype(dt) + return x, y, dt + + +def fftw_dst_ref(type, size, dt): + x = np.linspace(0, size-1, size).astype(dt) + dt = np.result_type(np.float32, dt) + if dt == np.float64: + data = FFTWDATA_DOUBLE + elif dt == np.float32: + data = FFTWDATA_SINGLE + else: + raise ValueError() + y = (data['dst_%d_%d' % (type, size)]).astype(dt) + return x, y, dt + + +def dct_2d_ref(x, **kwargs): + """Calculate reference values for testing dct2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = dct(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = dct(x[:, col], **kwargs) + return x + + +def idct_2d_ref(x, **kwargs): + """Calculate reference values for testing idct2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = idct(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = idct(x[:, col], **kwargs) + return x + + +def dst_2d_ref(x, **kwargs): + """Calculate reference values for testing dst2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = dst(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = dst(x[:, col], **kwargs) + return x + + +def idst_2d_ref(x, **kwargs): + """Calculate reference values for testing idst2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = idst(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = idst(x[:, col], **kwargs) + return x + + +def naive_dct1(x, norm=None): + """Calculate textbook definition version of DCT-I.""" + x = np.array(x, copy=True) + N = len(x) + M = N-1 + y = np.zeros(N) + m0, m = 1, 2 + if norm == 'ortho': + m0 = np.sqrt(1.0/M) + m = np.sqrt(2.0/M) + for k in range(N): + for n in range(1, N-1): + y[k] += m*x[n]*np.cos(np.pi*n*k/M) + y[k] += m0 * x[0] + y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1) + if norm == 'ortho': + y[0] *= 1/np.sqrt(2) + y[N-1] *= 1/np.sqrt(2) + return y + + +def naive_dst1(x, norm=None): + """Calculate textbook definition version of DST-I.""" + x = np.array(x, copy=True) + N = len(x) + M = N+1 + y = np.zeros(N) + for k in range(N): + for n in range(N): + y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M) + if norm == 'ortho': + y *= np.sqrt(0.5/M) + return y + + +def naive_dct4(x, norm=None): + """Calculate textbook definition version of DCT-IV.""" + x = np.array(x, copy=True) + N = len(x) + y = np.zeros(N) + for k in range(N): + for n in range(N): + y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N)) + if norm == 'ortho': + y *= np.sqrt(2.0/N) + else: + y *= 2 + return y + + +def naive_dst4(x, norm=None): + """Calculate textbook definition version of DST-IV.""" + x = np.array(x, copy=True) + N = len(x) + y = np.zeros(N) + for k in range(N): + for n in range(N): + y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N)) + if norm == 'ortho': + y *= np.sqrt(2.0/N) + else: + y *= 2 + return y + + +class TestComplex: + def test_dct_complex64(self): + y = dct(1j*np.arange(5, dtype=np.complex64)) + x = 1j*dct(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_dct_complex(self): + y = dct(np.arange(5)*1j) + x = 1j*dct(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_idct_complex(self): + y = idct(np.arange(5)*1j) + x = 1j*idct(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_dst_complex64(self): + y = dst(np.arange(5, dtype=np.complex64)*1j) + x = 1j*dst(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_dst_complex(self): + y = dst(np.arange(5)*1j) + x = 1j*dst(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_idst_complex(self): + y = idst(np.arange(5)*1j) + x = 1j*idst(np.arange(5)) + assert_array_almost_equal(x, y) + + +class _TestDCTBase: + def setup_method(self): + self.rdt = None + self.dec = 14 + self.type = None + + def test_definition(self): + for i in FFTWDATA_SIZES: + x, yr, dt = fftw_dct_ref(self.type, i, self.rdt) + y = dct(x, type=self.type) + assert_equal(y.dtype, dt) + # XXX: we divide by np.max(y) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec, + err_msg="Size %d failed" % i) + + def test_axis(self): + nt = 2 + for i in [7, 8, 9, 16, 32, 64]: + x = np.random.randn(nt, i) + y = dct(x, type=self.type) + for j in range(nt): + assert_array_almost_equal(y[j], dct(x[j], type=self.type), + decimal=self.dec) + + x = x.T + y = dct(x, axis=0, type=self.type) + for j in range(nt): + assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type), + decimal=self.dec) + + +class _TestDCTIBase(_TestDCTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dct(x, norm='ortho', type=1) + y2 = naive_dct1(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec) + +class _TestDCTIIBase(_TestDCTBase): + def test_definition_matlab(self): + # Test correspondence with MATLAB (orthornomal mode). + dt = np.result_type(np.float32, self.rdt) + for xr, yr in zip(X, Y): + x = np.array(xr, dtype=dt) + y = dct(x, norm="ortho", type=2) + assert_equal(y.dtype, dt) + assert_array_almost_equal(y, yr, decimal=self.dec) + + +class _TestDCTIIIBase(_TestDCTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dct(x, norm='ortho', type=2) + xi = dct(y, norm="ortho", type=3) + assert_equal(xi.dtype, dt) + assert_array_almost_equal(xi, x, decimal=self.dec) + +class _TestDCTIVBase(_TestDCTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dct(x, norm='ortho', type=4) + y2 = naive_dct4(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec) + + +class TestDCTIDouble(_TestDCTIBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 10 + self.type = 1 + + +class TestDCTIFloat(_TestDCTIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestDCTIInt(_TestDCTIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 1 + + +class TestDCTIIDouble(_TestDCTIIBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 10 + self.type = 2 + + +class TestDCTIIFloat(_TestDCTIIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 2 + + +class TestDCTIIInt(_TestDCTIIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 2 + + +class TestDCTIIIDouble(_TestDCTIIIBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 3 + + +class TestDCTIIIFloat(_TestDCTIIIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 3 + + +class TestDCTIIIInt(_TestDCTIIIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 3 + + +class TestDCTIVDouble(_TestDCTIVBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 3 + + +class TestDCTIVFloat(_TestDCTIVBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 3 + + +class TestDCTIVInt(_TestDCTIVBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 3 + + +class _TestIDCTBase: + def setup_method(self): + self.rdt = None + self.dec = 14 + self.type = None + + def test_definition(self): + for i in FFTWDATA_SIZES: + xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt) + x = idct(yr, type=self.type) + if self.type == 1: + x /= 2 * (i-1) + else: + x /= 2 * i + assert_equal(x.dtype, dt) + # XXX: we divide by np.max(y) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec, + err_msg="Size %d failed" % i) + + +class TestIDCTIDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 10 + self.type = 1 + + +class TestIDCTIFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestIDCTIInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 4 + self.type = 1 + + +class TestIDCTIIDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 10 + self.type = 2 + + +class TestIDCTIIFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 2 + + +class TestIDCTIIInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 2 + + +class TestIDCTIIIDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 3 + + +class TestIDCTIIIFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 3 + + +class TestIDCTIIIInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 3 + +class TestIDCTIVDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 4 + + +class TestIDCTIVFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 4 + + +class TestIDCTIVInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 4 + +class _TestDSTBase: + def setup_method(self): + self.rdt = None # dtype + self.dec = None # number of decimals to match + self.type = None # dst type + + def test_definition(self): + for i in FFTWDATA_SIZES: + xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt) + y = dst(xr, type=self.type) + assert_equal(y.dtype, dt) + # XXX: we divide by np.max(y) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec, + err_msg="Size %d failed" % i) + + +class _TestDSTIBase(_TestDSTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dst(x, norm='ortho', type=1) + y2 = naive_dst1(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec) + +class _TestDSTIVBase(_TestDSTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dst(x, norm='ortho', type=4) + y2 = naive_dst4(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y, y2, decimal=self.dec) + +class TestDSTIDouble(_TestDSTIBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 1 + + +class TestDSTIFloat(_TestDSTIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestDSTIInt(_TestDSTIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 1 + + +class TestDSTIIDouble(_TestDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 2 + + +class TestDSTIIFloat(_TestDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 2 + + +class TestDSTIIInt(_TestDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 2 + + +class TestDSTIIIDouble(_TestDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 3 + + +class TestDSTIIIFloat(_TestDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 7 + self.type = 3 + + +class TestDSTIIIInt(_TestDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 7 + self.type = 3 + + +class TestDSTIVDouble(_TestDSTIVBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 4 + + +class TestDSTIVFloat(_TestDSTIVBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 4 + + +class TestDSTIVInt(_TestDSTIVBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 4 + + +class _TestIDSTBase: + def setup_method(self): + self.rdt = None + self.dec = None + self.type = None + + def test_definition(self): + for i in FFTWDATA_SIZES: + xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt) + x = idst(yr, type=self.type) + if self.type == 1: + x /= 2 * (i+1) + else: + x /= 2 * i + assert_equal(x.dtype, dt) + # XXX: we divide by np.max(x) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec, + err_msg="Size %d failed" % i) + + +class TestIDSTIDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 1 + + +class TestIDSTIFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestIDSTIInt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 4 + self.type = 1 + + +class TestIDSTIIDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 2 + + +class TestIDSTIIFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 2 + + +class TestIDSTIIInt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 2 + + +class TestIDSTIIIDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 3 + + +class TestIDSTIIIFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 3 + + +class TestIDSTIIIInt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 3 + + +class TestIDSTIVDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 4 + + +class TestIDSTIVFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 4 + + +class TestIDSTIVnt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 4 + + +class TestOverwrite: + """Check input overwrite behavior.""" + + real_dtypes = [np.float32, np.float64] + + def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, **kw): + x2 = x.copy() + routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x) + + sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format( + routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x) + if not overwrite_x: + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + def _check_1d(self, routine, dtype, shape, axis): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + for type in [1, 2, 3, 4]: + for overwrite_x in [True, False]: + for norm in [None, 'ortho']: + self._check(data, routine, type, None, axis, norm, + overwrite_x) + + def test_dct(self): + for dtype in self.real_dtypes: + self._check_1d(dct, dtype, (16,), -1) + self._check_1d(dct, dtype, (16, 2), 0) + self._check_1d(dct, dtype, (2, 16), 1) + + def test_idct(self): + for dtype in self.real_dtypes: + self._check_1d(idct, dtype, (16,), -1) + self._check_1d(idct, dtype, (16, 2), 0) + self._check_1d(idct, dtype, (2, 16), 1) + + def test_dst(self): + for dtype in self.real_dtypes: + self._check_1d(dst, dtype, (16,), -1) + self._check_1d(dst, dtype, (16, 2), 0) + self._check_1d(dst, dtype, (2, 16), 1) + + def test_idst(self): + for dtype in self.real_dtypes: + self._check_1d(idst, dtype, (16,), -1) + self._check_1d(idst, dtype, (16, 2), 0) + self._check_1d(idst, dtype, (2, 16), 1) + + +class Test_DCTN_IDCTN: + dec = 14 + dct_type = [1, 2, 3, 4] + norms = [None, 'ortho'] + rstate = np.random.RandomState(1234) + shape = (32, 16) + data = rstate.randn(*shape) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + @pytest.mark.parametrize('axes', [None, + 1, (1,), [1], + 0, (0,), [0], + (0, 1), [0, 1], + (-2, -1), [-2, -1]]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', ['ortho']) + def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm): + tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm) + tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm) + assert_array_almost_equal(self.data, tmp, decimal=12) + + @pytest.mark.parametrize('fforward,fforward_ref', [(dctn, dct_2d_ref), + (dstn, dst_2d_ref)]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', norms) + def test_dctn_vs_2d_reference(self, fforward, fforward_ref, + dct_type, norm): + y1 = fforward(self.data, type=dct_type, axes=None, norm=norm) + y2 = fforward_ref(self.data, type=dct_type, norm=norm) + assert_array_almost_equal(y1, y2, decimal=11) + + @pytest.mark.parametrize('finverse,finverse_ref', [(idctn, idct_2d_ref), + (idstn, idst_2d_ref)]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', [None, 'ortho']) + def test_idctn_vs_2d_reference(self, finverse, finverse_ref, + dct_type, norm): + fdata = dctn(self.data, type=dct_type, norm=norm) + y1 = finverse(fdata, type=dct_type, norm=norm) + y2 = finverse_ref(fdata, type=dct_type, norm=norm) + assert_array_almost_equal(y1, y2, decimal=11) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + def test_axes_and_shape(self, fforward, finverse): + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, shape=self.data.shape[0], axes=(0, 1)) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, shape=self.data.shape[0], axes=None) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, shape=self.data.shape, axes=0) + + @pytest.mark.parametrize('fforward', [dctn, dstn]) + def test_shape(self, fforward): + tmp = fforward(self.data, shape=(128, 128), axes=None) + assert_equal(tmp.shape, (128, 128)) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + @pytest.mark.parametrize('axes', [1, (1,), [1], + 0, (0,), [0]]) + def test_shape_is_none_with_axes(self, fforward, finverse, axes): + tmp = fforward(self.data, shape=None, axes=axes, norm='ortho') + tmp = finverse(tmp, shape=None, axes=axes, norm='ortho') + assert_array_almost_equal(self.data, tmp, decimal=self.dec) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/mmio.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/mmio.py new file mode 100644 index 0000000000000000000000000000000000000000..e03c62376c52de4824086624eb0fb072e07057aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/mmio.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.io` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'mminfo', 'mmread', 'mmwrite', 'MMFile', + 'coo_matrix', 'asstr' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="io", module="mmio", + private_modules=["_mmio"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_2d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_2d.sav new file mode 100644 index 0000000000000000000000000000000000000000..804d8b1a8a90636c880e974b6f85bd385033306b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_2d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_4d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_4d.sav new file mode 100644 index 0000000000000000000000000000000000000000..4bb951e274a399f091ff70b639d6e3b55ee1e122 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_4d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_6d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_6d.sav new file mode 100644 index 0000000000000000000000000000000000000000..91588d348d5f89af354209840062202d5b28c1df Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_6d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_7d.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_7d.sav new file mode 100644 index 0000000000000000000000000000000000000000..995d23c6ed05b095442b6247b09191126f797f23 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_pointer_7d.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_1.nc b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_1.nc new file mode 100644 index 0000000000000000000000000000000000000000..5775622d0ef85828b436dffcd21366f7538fc55c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/example_1.nc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x7.dat b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x7.dat new file mode 100644 index 0000000000000000000000000000000000000000..0bd683096f18eadceb7168f811c75bf072baecfe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/fortran-sf8-1x1x7.dat differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_byte.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_byte.sav new file mode 100644 index 0000000000000000000000000000000000000000..e4027b3cf302b8610b87d9ef8b0aac39d5a40ef9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_byte.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint32.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint32.sav new file mode 100644 index 0000000000000000000000000000000000000000..74dec7b8933418d30d17c83d617443a73ceef0c6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/scalar_uint32.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays.sav new file mode 100644 index 0000000000000000000000000000000000000000..40c9cd330e0c731968d71dbbfeae9bd8c4a745a2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_arrays.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers_replicated.sav b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers_replicated.sav new file mode 100644 index 0000000000000000000000000000000000000000..d16f4655cc20318db2b0d629cd5ed6d7be01b518 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/struct_pointers_replicated.sav differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/test_fortran.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/test_fortran.py new file mode 100644 index 0000000000000000000000000000000000000000..905140764411ea0fabf760f2288c546f636f232a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/test_fortran.py @@ -0,0 +1,236 @@ +''' Tests for fortran sequential files ''' + +import tempfile +import shutil +from os import path +from glob import iglob +import re + +from numpy.testing import assert_equal, assert_allclose +import numpy as np +import pytest + +from scipy.io import (FortranFile, + _test_fortran, + FortranEOFError, + FortranFormattingError) + + +DATA_PATH = path.join(path.dirname(__file__), 'data') + + +def test_fortranfiles_read(): + for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")): + m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I) + if not m: + raise RuntimeError("Couldn't match %s filename to regex" % filename) + + dims = (int(m.group(2)), int(m.group(3)), int(m.group(4))) + + dtype = m.group(1).replace('s', '<') + + f = FortranFile(filename, 'r', ' 0] = 1 + info = (2, 2, 3, 'coordinate', 'pattern', 'general') + mmwrite(self.fn, a, field='pattern') + assert_equal(mminfo(self.fn), info) + b = mmread(self.fn) + assert_array_almost_equal(p, b.toarray()) + + def test_gh13634_non_skew_symmetric_int(self): + a = scipy.sparse.csr_matrix([[1, 2], [-2, 99]], dtype=np.int32) + self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general')) + + def test_gh13634_non_skew_symmetric_float(self): + a = scipy.sparse.csr_matrix([[1, 2], [-2, 99.]], dtype=np.float32) + self.check(a, (2, 2, 4, 'coordinate', 'real', 'general')) + + +_32bit_integer_dense_example = '''\ +%%MatrixMarket matrix array integer general +2 2 +2147483647 +2147483646 +2147483647 +2147483646 +''' + +_32bit_integer_sparse_example = '''\ +%%MatrixMarket matrix coordinate integer symmetric +2 2 2 +1 1 2147483647 +2 2 2147483646 +''' + +_64bit_integer_dense_example = '''\ +%%MatrixMarket matrix array integer general +2 2 + 2147483648 +-9223372036854775806 + -2147483648 + 9223372036854775807 +''' + +_64bit_integer_sparse_general_example = '''\ +%%MatrixMarket matrix coordinate integer general +2 2 3 +1 1 2147483648 +1 2 9223372036854775807 +2 2 9223372036854775807 +''' + +_64bit_integer_sparse_symmetric_example = '''\ +%%MatrixMarket matrix coordinate integer symmetric +2 2 3 +1 1 2147483648 +1 2 -9223372036854775807 +2 2 9223372036854775807 +''' + +_64bit_integer_sparse_skew_example = '''\ +%%MatrixMarket matrix coordinate integer skew-symmetric +2 2 3 +1 1 2147483648 +1 2 -9223372036854775807 +2 2 9223372036854775807 +''' + +_over64bit_integer_dense_example = '''\ +%%MatrixMarket matrix array integer general +2 2 + 2147483648 +9223372036854775807 + 2147483648 +9223372036854775808 +''' + +_over64bit_integer_sparse_example = '''\ +%%MatrixMarket matrix coordinate integer symmetric +2 2 2 +1 1 2147483648 +2 2 19223372036854775808 +''' + + +class TestMMIOReadLargeIntegers: + def setup_method(self): + self.tmpdir = mkdtemp() + self.fn = os.path.join(self.tmpdir, 'testfile.mtx') + + def teardown_method(self): + shutil.rmtree(self.tmpdir) + + def check_read(self, example, a, info, dense, over32, over64): + with open(self.fn, 'w') as f: + f.write(example) + assert_equal(mminfo(self.fn), info) + if ((over32 and (np.intp(0).itemsize < 8) and mmwrite == scipy.io._mmio.mmwrite) + or over64): + assert_raises(OverflowError, mmread, self.fn) + else: + b = mmread(self.fn) + if not dense: + b = b.toarray() + assert_equal(a, b) + + def test_read_32bit_integer_dense(self): + a = array([[2**31-1, 2**31-1], + [2**31-2, 2**31-2]], dtype=np.int64) + self.check_read(_32bit_integer_dense_example, + a, + (2, 2, 4, 'array', 'integer', 'general'), + dense=True, + over32=False, + over64=False) + + def test_read_32bit_integer_sparse(self): + a = array([[2**31-1, 0], + [0, 2**31-2]], dtype=np.int64) + self.check_read(_32bit_integer_sparse_example, + a, + (2, 2, 2, 'coordinate', 'integer', 'symmetric'), + dense=False, + over32=False, + over64=False) + + def test_read_64bit_integer_dense(self): + a = array([[2**31, -2**31], + [-2**63+2, 2**63-1]], dtype=np.int64) + self.check_read(_64bit_integer_dense_example, + a, + (2, 2, 4, 'array', 'integer', 'general'), + dense=True, + over32=True, + over64=False) + + def test_read_64bit_integer_sparse_general(self): + a = array([[2**31, 2**63-1], + [0, 2**63-1]], dtype=np.int64) + self.check_read(_64bit_integer_sparse_general_example, + a, + (2, 2, 3, 'coordinate', 'integer', 'general'), + dense=False, + over32=True, + over64=False) + + def test_read_64bit_integer_sparse_symmetric(self): + a = array([[2**31, -2**63+1], + [-2**63+1, 2**63-1]], dtype=np.int64) + self.check_read(_64bit_integer_sparse_symmetric_example, + a, + (2, 2, 3, 'coordinate', 'integer', 'symmetric'), + dense=False, + over32=True, + over64=False) + + def test_read_64bit_integer_sparse_skew(self): + a = array([[2**31, -2**63+1], + [2**63-1, 2**63-1]], dtype=np.int64) + self.check_read(_64bit_integer_sparse_skew_example, + a, + (2, 2, 3, 'coordinate', 'integer', 'skew-symmetric'), + dense=False, + over32=True, + over64=False) + + def test_read_over64bit_integer_dense(self): + self.check_read(_over64bit_integer_dense_example, + None, + (2, 2, 4, 'array', 'integer', 'general'), + dense=True, + over32=True, + over64=True) + + def test_read_over64bit_integer_sparse(self): + self.check_read(_over64bit_integer_sparse_example, + None, + (2, 2, 2, 'coordinate', 'integer', 'symmetric'), + dense=False, + over32=True, + over64=True) + + +_general_example = '''\ +%%MatrixMarket matrix coordinate real general +%================================================================================= +% +% This ASCII file represents a sparse MxN matrix with L +% nonzeros in the following Matrix Market format: +% +% +----------------------------------------------+ +% |%%MatrixMarket matrix coordinate real general | <--- header line +% |% | <--+ +% |% comments | |-- 0 or more comment lines +% |% | <--+ +% | M N L | <--- rows, columns, entries +% | I1 J1 A(I1, J1) | <--+ +% | I2 J2 A(I2, J2) | | +% | I3 J3 A(I3, J3) | |-- L lines +% | . . . | | +% | IL JL A(IL, JL) | <--+ +% +----------------------------------------------+ +% +% Indices are 1-based, i.e. A(1,1) is the first element. +% +%================================================================================= + 5 5 8 + 1 1 1.000e+00 + 2 2 1.050e+01 + 3 3 1.500e-02 + 1 4 6.000e+00 + 4 2 2.505e+02 + 4 4 -2.800e+02 + 4 5 3.332e+01 + 5 5 1.200e+01 +''' + +_hermitian_example = '''\ +%%MatrixMarket matrix coordinate complex hermitian + 5 5 7 + 1 1 1.0 0 + 2 2 10.5 0 + 4 2 250.5 22.22 + 3 3 1.5e-2 0 + 4 4 -2.8e2 0 + 5 5 12. 0 + 5 4 0 33.32 +''' + +_skew_example = '''\ +%%MatrixMarket matrix coordinate real skew-symmetric + 5 5 7 + 1 1 1.0 + 2 2 10.5 + 4 2 250.5 + 3 3 1.5e-2 + 4 4 -2.8e2 + 5 5 12. + 5 4 0 +''' + +_symmetric_example = '''\ +%%MatrixMarket matrix coordinate real symmetric + 5 5 7 + 1 1 1.0 + 2 2 10.5 + 4 2 250.5 + 3 3 1.5e-2 + 4 4 -2.8e2 + 5 5 12. + 5 4 8 +''' + +_symmetric_pattern_example = '''\ +%%MatrixMarket matrix coordinate pattern symmetric + 5 5 7 + 1 1 + 2 2 + 4 2 + 3 3 + 4 4 + 5 5 + 5 4 +''' + +# example (without comment lines) from Figure 1 in +# https://math.nist.gov/MatrixMarket/reports/MMformat.ps +_empty_lines_example = '''\ +%%MatrixMarket MATRIX Coordinate Real General + + 5 5 8 + +1 1 1.0 +2 2 10.5 +3 3 1.5e-2 +4 4 -2.8E2 +5 5 12. + 1 4 6 + 4 2 250.5 + 4 5 33.32 + +''' + + +class TestMMIOCoordinate: + def setup_method(self): + self.tmpdir = mkdtemp() + self.fn = os.path.join(self.tmpdir, 'testfile.mtx') + + def teardown_method(self): + shutil.rmtree(self.tmpdir) + + def check_read(self, example, a, info): + f = open(self.fn, 'w') + f.write(example) + f.close() + assert_equal(mminfo(self.fn), info) + b = mmread(self.fn).toarray() + assert_array_almost_equal(a, b) + + def test_read_general(self): + a = [[1, 0, 0, 6, 0], + [0, 10.5, 0, 0, 0], + [0, 0, .015, 0, 0], + [0, 250.5, 0, -280, 33.32], + [0, 0, 0, 0, 12]] + self.check_read(_general_example, a, + (5, 5, 8, 'coordinate', 'real', 'general')) + + def test_read_hermitian(self): + a = [[1, 0, 0, 0, 0], + [0, 10.5, 0, 250.5 - 22.22j, 0], + [0, 0, .015, 0, 0], + [0, 250.5 + 22.22j, 0, -280, -33.32j], + [0, 0, 0, 33.32j, 12]] + self.check_read(_hermitian_example, a, + (5, 5, 7, 'coordinate', 'complex', 'hermitian')) + + def test_read_skew(self): + a = [[1, 0, 0, 0, 0], + [0, 10.5, 0, -250.5, 0], + [0, 0, .015, 0, 0], + [0, 250.5, 0, -280, 0], + [0, 0, 0, 0, 12]] + self.check_read(_skew_example, a, + (5, 5, 7, 'coordinate', 'real', 'skew-symmetric')) + + def test_read_symmetric(self): + a = [[1, 0, 0, 0, 0], + [0, 10.5, 0, 250.5, 0], + [0, 0, .015, 0, 0], + [0, 250.5, 0, -280, 8], + [0, 0, 0, 8, 12]] + self.check_read(_symmetric_example, a, + (5, 5, 7, 'coordinate', 'real', 'symmetric')) + + def test_read_symmetric_pattern(self): + a = [[1, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + [0, 1, 0, 1, 1], + [0, 0, 0, 1, 1]] + self.check_read(_symmetric_pattern_example, a, + (5, 5, 7, 'coordinate', 'pattern', 'symmetric')) + + def test_read_empty_lines(self): + a = [[1, 0, 0, 6, 0], + [0, 10.5, 0, 0, 0], + [0, 0, .015, 0, 0], + [0, 250.5, 0, -280, 33.32], + [0, 0, 0, 0, 12]] + self.check_read(_empty_lines_example, a, + (5, 5, 8, 'coordinate', 'real', 'general')) + + def test_empty_write_read(self): + # https://github.com/scipy/scipy/issues/1410 (Trac #883) + + b = scipy.sparse.coo_matrix((10, 10)) + mmwrite(self.fn, b) + + assert_equal(mminfo(self.fn), + (10, 10, 0, 'coordinate', 'real', 'symmetric')) + a = b.toarray() + b = mmread(self.fn).toarray() + assert_array_almost_equal(a, b) + + def test_bzip2_py3(self): + # test if fix for #2152 works + try: + # bz2 module isn't always built when building Python. + import bz2 + except ImportError: + return + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) + + b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) + + mmwrite(self.fn, b) + + fn_bzip2 = "%s.bz2" % self.fn + with open(self.fn, 'rb') as f_in: + f_out = bz2.BZ2File(fn_bzip2, 'wb') + f_out.write(f_in.read()) + f_out.close() + + a = mmread(fn_bzip2).toarray() + assert_array_almost_equal(a, b.toarray()) + + def test_gzip_py3(self): + # test if fix for #2152 works + try: + # gzip module can be missing from Python installation + import gzip + except ImportError: + return + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) + + b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) + + mmwrite(self.fn, b) + + fn_gzip = "%s.gz" % self.fn + with open(self.fn, 'rb') as f_in: + f_out = gzip.open(fn_gzip, 'wb') + f_out.write(f_in.read()) + f_out.close() + + a = mmread(fn_gzip).toarray() + assert_array_almost_equal(a, b.toarray()) + + def test_real_write_read(self): + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) + + b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) + + mmwrite(self.fn, b) + + assert_equal(mminfo(self.fn), + (5, 5, 8, 'coordinate', 'real', 'general')) + a = b.toarray() + b = mmread(self.fn).toarray() + assert_array_almost_equal(a, b) + + def test_complex_write_read(self): + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, + 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) + + b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) + + mmwrite(self.fn, b) + + assert_equal(mminfo(self.fn), + (5, 5, 8, 'coordinate', 'complex', 'general')) + a = b.toarray() + b = mmread(self.fn).toarray() + assert_array_almost_equal(a, b) + + def test_sparse_formats(self, tmp_path): + # Note: `tmp_path` is a pytest fixture, it handles cleanup + tmpdir = tmp_path / 'sparse_formats' + tmpdir.mkdir() + + mats = [] + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + + V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) + mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))) + + V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, + 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) + mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))) + + for mat in mats: + expected = mat.toarray() + for fmt in ['csr', 'csc', 'coo']: + fname = tmpdir / (fmt + '.mtx') + mmwrite(fname, mat.asformat(fmt)) + result = mmread(fname).toarray() + assert_array_almost_equal(result, expected) + + def test_precision(self): + test_values = [pi] + [10**(i) for i in range(0, -10, -1)] + test_precisions = range(1, 10) + for value in test_values: + for precision in test_precisions: + # construct sparse matrix with test value at last main diagonal + n = 10**precision + 1 + A = scipy.sparse.dok_matrix((n, n)) + A[n-1, n-1] = value + # write matrix with test precision and read again + mmwrite(self.fn, A, precision=precision) + A = scipy.io.mmread(self.fn) + # check for right entries in matrix + assert_array_equal(A.row, [n-1]) + assert_array_equal(A.col, [n-1]) + assert_allclose(A.data, [float('%%.%dg' % precision % value)]) + + def test_bad_number_of_coordinate_header_fields(self): + s = """\ + %%MatrixMarket matrix coordinate real general + 5 5 8 999 + 1 1 1.000e+00 + 2 2 1.050e+01 + 3 3 1.500e-02 + 1 4 6.000e+00 + 4 2 2.505e+02 + 4 4 -2.800e+02 + 4 5 3.332e+01 + 5 5 1.200e+01 + """ + text = textwrap.dedent(s).encode('ascii') + with pytest.raises(ValueError, match='not of length 3'): + scipy.io.mmread(io.BytesIO(text)) + + +def test_gh11389(): + mmread(io.StringIO("%%MatrixMarket matrix coordinate complex symmetric\n" + " 1 1 1\n" + "1 1 -2.1846000000000e+02 0.0000000000000e+00")) + + +def test_gh18123(tmp_path): + lines = [" %%MatrixMarket matrix coordinate real general\n", + "5 5 3\n", + "2 3 1.0\n", + "3 4 2.0\n", + "3 5 3.0\n"] + test_file = tmp_path / "test.mtx" + with open(test_file, "w") as f: + f.writelines(lines) + mmread(test_file) + + +def test_threadpoolctl(): + try: + import threadpoolctl + if not hasattr(threadpoolctl, "register"): + pytest.skip("threadpoolctl too old") + return + except ImportError: + pytest.skip("no threadpoolctl") + return + + with threadpoolctl.threadpool_limits(limits=4): + assert_equal(fmm.PARALLELISM, 4) + + with threadpoolctl.threadpool_limits(limits=2, user_api='scipy'): + assert_equal(fmm.PARALLELISM, 2) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/test_netcdf.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/test_netcdf.py new file mode 100644 index 0000000000000000000000000000000000000000..0dcf7a71ebd296b9d1114b53091837eee3499db2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/test_netcdf.py @@ -0,0 +1,546 @@ +''' Tests for netcdf ''' +import os +from os.path import join as pjoin, dirname +import shutil +import tempfile +import warnings +from io import BytesIO +from glob import glob +from contextlib import contextmanager + +import numpy as np +from numpy.testing import (assert_, assert_allclose, assert_equal, + break_cycles, suppress_warnings, IS_PYPY) +from pytest import raises as assert_raises + +from scipy.io import netcdf_file +from scipy._lib._tmpdirs import in_tempdir + +TEST_DATA_PATH = pjoin(dirname(__file__), 'data') + +N_EG_ELS = 11 # number of elements for example variable +VARTYPE_EG = 'b' # var type for example variable + + +@contextmanager +def make_simple(*args, **kwargs): + f = netcdf_file(*args, **kwargs) + f.history = 'Created for a test' + f.createDimension('time', N_EG_ELS) + time = f.createVariable('time', VARTYPE_EG, ('time',)) + time[:] = np.arange(N_EG_ELS) + time.units = 'days since 2008-01-01' + f.flush() + yield f + f.close() + + +def check_simple(ncfileobj): + '''Example fileobj tests ''' + assert_equal(ncfileobj.history, b'Created for a test') + time = ncfileobj.variables['time'] + assert_equal(time.units, b'days since 2008-01-01') + assert_equal(time.shape, (N_EG_ELS,)) + assert_equal(time[-1], N_EG_ELS-1) + +def assert_mask_matches(arr, expected_mask): + ''' + Asserts that the mask of arr is effectively the same as expected_mask. + + In contrast to numpy.ma.testutils.assert_mask_equal, this function allows + testing the 'mask' of a standard numpy array (the mask in this case is treated + as all False). + + Parameters + ---------- + arr : ndarray or MaskedArray + Array to test. + expected_mask : array_like of booleans + A list giving the expected mask. + ''' + + mask = np.ma.getmaskarray(arr) + assert_equal(mask, expected_mask) + + +def test_read_write_files(): + # test round trip for example file + cwd = os.getcwd() + try: + tmpdir = tempfile.mkdtemp() + os.chdir(tmpdir) + with make_simple('simple.nc', 'w') as f: + pass + # read the file we just created in 'a' mode + with netcdf_file('simple.nc', 'a') as f: + check_simple(f) + # add something + f._attributes['appendRan'] = 1 + + # To read the NetCDF file we just created:: + with netcdf_file('simple.nc') as f: + # Using mmap is the default (but not on pypy) + assert_equal(f.use_mmap, not IS_PYPY) + check_simple(f) + assert_equal(f._attributes['appendRan'], 1) + + # Read it in append (and check mmap is off) + with netcdf_file('simple.nc', 'a') as f: + assert_(not f.use_mmap) + check_simple(f) + assert_equal(f._attributes['appendRan'], 1) + + # Now without mmap + with netcdf_file('simple.nc', mmap=False) as f: + # Using mmap is the default + assert_(not f.use_mmap) + check_simple(f) + + # To read the NetCDF file we just created, as file object, no + # mmap. When n * n_bytes(var_type) is not divisible by 4, this + # raised an error in pupynere 1.0.12 and scipy rev 5893, because + # calculated vsize was rounding up in units of 4 - see + # https://www.unidata.ucar.edu/software/netcdf/guide_toc.html + with open('simple.nc', 'rb') as fobj: + with netcdf_file(fobj) as f: + # by default, don't use mmap for file-like + assert_(not f.use_mmap) + check_simple(f) + + # Read file from fileobj, with mmap + with suppress_warnings() as sup: + if IS_PYPY: + sup.filter(RuntimeWarning, + "Cannot close a netcdf_file opened with mmap=True.*") + with open('simple.nc', 'rb') as fobj: + with netcdf_file(fobj, mmap=True) as f: + assert_(f.use_mmap) + check_simple(f) + + # Again read it in append mode (adding another att) + with open('simple.nc', 'r+b') as fobj: + with netcdf_file(fobj, 'a') as f: + assert_(not f.use_mmap) + check_simple(f) + f.createDimension('app_dim', 1) + var = f.createVariable('app_var', 'i', ('app_dim',)) + var[:] = 42 + + # And... check that app_var made it in... + with netcdf_file('simple.nc') as f: + check_simple(f) + assert_equal(f.variables['app_var'][:], 42) + + finally: + if IS_PYPY: + # windows cannot remove a dead file held by a mmap + # that has not been collected in PyPy + break_cycles() + break_cycles() + os.chdir(cwd) + shutil.rmtree(tmpdir) + + +def test_read_write_sio(): + eg_sio1 = BytesIO() + with make_simple(eg_sio1, 'w'): + str_val = eg_sio1.getvalue() + + eg_sio2 = BytesIO(str_val) + with netcdf_file(eg_sio2) as f2: + check_simple(f2) + + # Test that error is raised if attempting mmap for sio + eg_sio3 = BytesIO(str_val) + assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True) + # Test 64-bit offset write / read + eg_sio_64 = BytesIO() + with make_simple(eg_sio_64, 'w', version=2) as f_64: + str_val = eg_sio_64.getvalue() + + eg_sio_64 = BytesIO(str_val) + with netcdf_file(eg_sio_64) as f_64: + check_simple(f_64) + assert_equal(f_64.version_byte, 2) + # also when version 2 explicitly specified + eg_sio_64 = BytesIO(str_val) + with netcdf_file(eg_sio_64, version=2) as f_64: + check_simple(f_64) + assert_equal(f_64.version_byte, 2) + + +def test_bytes(): + raw_file = BytesIO() + f = netcdf_file(raw_file, mode='w') + # Dataset only has a single variable, dimension and attribute to avoid + # any ambiguity related to order. + f.a = 'b' + f.createDimension('dim', 1) + var = f.createVariable('var', np.int16, ('dim',)) + var[0] = -9999 + var.c = 'd' + f.sync() + + actual = raw_file.getvalue() + + expected = (b'CDF\x01' + b'\x00\x00\x00\x00' + b'\x00\x00\x00\x0a' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x03' + b'dim\x00' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x0c' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x01' + b'a\x00\x00\x00' + b'\x00\x00\x00\x02' + b'\x00\x00\x00\x01' + b'b\x00\x00\x00' + b'\x00\x00\x00\x0b' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x03' + b'var\x00' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x00' + b'\x00\x00\x00\x0c' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x01' + b'c\x00\x00\x00' + b'\x00\x00\x00\x02' + b'\x00\x00\x00\x01' + b'd\x00\x00\x00' + b'\x00\x00\x00\x03' + b'\x00\x00\x00\x04' + b'\x00\x00\x00\x78' + b'\xd8\xf1\x80\x01') + + assert_equal(actual, expected) + + +def test_encoded_fill_value(): + with netcdf_file(BytesIO(), mode='w') as f: + f.createDimension('x', 1) + var = f.createVariable('var', 'S1', ('x',)) + assert_equal(var._get_encoded_fill_value(), b'\x00') + var._FillValue = b'\x01' + assert_equal(var._get_encoded_fill_value(), b'\x01') + var._FillValue = b'\x00\x00' # invalid, wrong size + assert_equal(var._get_encoded_fill_value(), b'\x00') + + +def test_read_example_data(): + # read any example data files + for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')): + with netcdf_file(fname, 'r'): + pass + with netcdf_file(fname, 'r', mmap=False): + pass + + +def test_itemset_no_segfault_on_readonly(): + # Regression test for ticket #1202. + # Open the test file in read-only mode. + + filename = pjoin(TEST_DATA_PATH, 'example_1.nc') + with suppress_warnings() as sup: + message = ("Cannot close a netcdf_file opened with mmap=True, when " + "netcdf_variables or arrays referring to its data still exist") + sup.filter(RuntimeWarning, message) + with netcdf_file(filename, 'r', mmap=True) as f: + time_var = f.variables['time'] + + # time_var.assignValue(42) should raise a RuntimeError--not seg. fault! + assert_raises(RuntimeError, time_var.assignValue, 42) + + +def test_appending_issue_gh_8625(): + stream = BytesIO() + + with make_simple(stream, mode='w') as f: + f.createDimension('x', 2) + f.createVariable('x', float, ('x',)) + f.variables['x'][...] = 1 + f.flush() + contents = stream.getvalue() + + stream = BytesIO(contents) + with netcdf_file(stream, mode='a') as f: + f.variables['x'][...] = 2 + + +def test_write_invalid_dtype(): + dtypes = ['int64', 'uint64'] + if np.dtype('int').itemsize == 8: # 64-bit machines + dtypes.append('int') + if np.dtype('uint').itemsize == 8: # 64-bit machines + dtypes.append('uint') + + with netcdf_file(BytesIO(), 'w') as f: + f.createDimension('time', N_EG_ELS) + for dt in dtypes: + assert_raises(ValueError, f.createVariable, 'time', dt, ('time',)) + + +def test_flush_rewind(): + stream = BytesIO() + with make_simple(stream, mode='w') as f: + f.createDimension('x',4) # x is used in createVariable + v = f.createVariable('v', 'i2', ['x']) + v[:] = 1 + f.flush() + len_single = len(stream.getvalue()) + f.flush() + len_double = len(stream.getvalue()) + + assert_(len_single == len_double) + + +def test_dtype_specifiers(): + # Numpy 1.7.0-dev had a bug where 'i2' wouldn't work. + # Specifying np.int16 or similar only works from the same commit as this + # comment was made. + with make_simple(BytesIO(), mode='w') as f: + f.createDimension('x',4) + f.createVariable('v1', 'i2', ['x']) + f.createVariable('v2', np.int16, ['x']) + f.createVariable('v3', np.dtype(np.int16), ['x']) + + +def test_ticket_1720(): + io = BytesIO() + + items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] + + with netcdf_file(io, 'w') as f: + f.history = 'Created for a test' + f.createDimension('float_var', 10) + float_var = f.createVariable('float_var', 'f', ('float_var',)) + float_var[:] = items + float_var.units = 'metres' + f.flush() + contents = io.getvalue() + + io = BytesIO(contents) + with netcdf_file(io, 'r') as f: + assert_equal(f.history, b'Created for a test') + float_var = f.variables['float_var'] + assert_equal(float_var.units, b'metres') + assert_equal(float_var.shape, (10,)) + assert_allclose(float_var[:], items) + + +def test_mmaps_segfault(): + filename = pjoin(TEST_DATA_PATH, 'example_1.nc') + + if not IS_PYPY: + with warnings.catch_warnings(): + warnings.simplefilter("error") + with netcdf_file(filename, mmap=True) as f: + x = f.variables['lat'][:] + # should not raise warnings + del x + + def doit(): + with netcdf_file(filename, mmap=True) as f: + return f.variables['lat'][:] + + # should not crash + with suppress_warnings() as sup: + message = ("Cannot close a netcdf_file opened with mmap=True, when " + "netcdf_variables or arrays referring to its data still exist") + sup.filter(RuntimeWarning, message) + x = doit() + x.sum() + + +def test_zero_dimensional_var(): + io = BytesIO() + with make_simple(io, 'w') as f: + v = f.createVariable('zerodim', 'i2', []) + # This is checking that .isrec returns a boolean - don't simplify it + # to 'assert not ...' + assert v.isrec is False, v.isrec + f.flush() + + +def test_byte_gatts(): + # Check that global "string" atts work like they did before py3k + # unicode and general bytes confusion + with in_tempdir(): + filename = 'g_byte_atts.nc' + f = netcdf_file(filename, 'w') + f._attributes['holy'] = b'grail' + f._attributes['witch'] = 'floats' + f.close() + f = netcdf_file(filename, 'r') + assert_equal(f._attributes['holy'], b'grail') + assert_equal(f._attributes['witch'], b'floats') + f.close() + + +def test_open_append(): + # open 'w' put one attr + with in_tempdir(): + filename = 'append_dat.nc' + f = netcdf_file(filename, 'w') + f._attributes['Kilroy'] = 'was here' + f.close() + + # open again in 'a', read the att and a new one + f = netcdf_file(filename, 'a') + assert_equal(f._attributes['Kilroy'], b'was here') + f._attributes['naughty'] = b'Zoot' + f.close() + + # open yet again in 'r' and check both atts + f = netcdf_file(filename, 'r') + assert_equal(f._attributes['Kilroy'], b'was here') + assert_equal(f._attributes['naughty'], b'Zoot') + f.close() + + +def test_append_recordDimension(): + dataSize = 100 + + with in_tempdir(): + # Create file with record time dimension + with netcdf_file('withRecordDimension.nc', 'w') as f: + f.createDimension('time', None) + f.createVariable('time', 'd', ('time',)) + f.createDimension('x', dataSize) + x = f.createVariable('x', 'd', ('x',)) + x[:] = np.array(range(dataSize)) + f.createDimension('y', dataSize) + y = f.createVariable('y', 'd', ('y',)) + y[:] = np.array(range(dataSize)) + f.createVariable('testData', 'i', ('time', 'x', 'y')) + f.flush() + f.close() + + for i in range(2): + # Open the file in append mode and add data + with netcdf_file('withRecordDimension.nc', 'a') as f: + f.variables['time'].data = np.append(f.variables["time"].data, i) + f.variables['testData'][i, :, :] = np.full((dataSize, dataSize), i) + f.flush() + + # Read the file and check that append worked + with netcdf_file('withRecordDimension.nc') as f: + assert_equal(f.variables['time'][-1], i) + assert_equal(f.variables['testData'][-1, :, :].copy(), + np.full((dataSize, dataSize), i)) + assert_equal(f.variables['time'].data.shape[0], i+1) + assert_equal(f.variables['testData'].data.shape[0], i+1) + + # Read the file and check that 'data' was not saved as user defined + # attribute of testData variable during append operation + with netcdf_file('withRecordDimension.nc') as f: + with assert_raises(KeyError) as ar: + f.variables['testData']._attributes['data'] + ex = ar.value + assert_equal(ex.args[0], 'data') + +def test_maskandscale(): + t = np.linspace(20, 30, 15) + t[3] = 100 + tm = np.ma.masked_greater(t, 99) + fname = pjoin(TEST_DATA_PATH, 'example_2.nc') + with netcdf_file(fname, maskandscale=True) as f: + Temp = f.variables['Temperature'] + assert_equal(Temp.missing_value, 9999) + assert_equal(Temp.add_offset, 20) + assert_equal(Temp.scale_factor, np.float32(0.01)) + found = Temp[:].compressed() + del Temp # Remove ref to mmap, so file can be closed. + expected = np.round(tm.compressed(), 2) + assert_allclose(found, expected) + + with in_tempdir(): + newfname = 'ms.nc' + f = netcdf_file(newfname, 'w', maskandscale=True) + f.createDimension('Temperature', len(tm)) + temp = f.createVariable('Temperature', 'i', ('Temperature',)) + temp.missing_value = 9999 + temp.scale_factor = 0.01 + temp.add_offset = 20 + temp[:] = tm + f.close() + + with netcdf_file(newfname, maskandscale=True) as f: + Temp = f.variables['Temperature'] + assert_equal(Temp.missing_value, 9999) + assert_equal(Temp.add_offset, 20) + assert_equal(Temp.scale_factor, np.float32(0.01)) + expected = np.round(tm.compressed(), 2) + found = Temp[:].compressed() + del Temp + assert_allclose(found, expected) + + +# ------------------------------------------------------------------------ +# Test reading with masked values (_FillValue / missing_value) +# ------------------------------------------------------------------------ + +def test_read_withValuesNearFillValue(): + # Regression test for ticket #5626 + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var1_fillval0'][:] + assert_mask_matches(vardata, [False, True, False]) + +def test_read_withNoFillValue(): + # For a variable with no fill value, reading data with maskandscale=True + # should return unmasked data + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var2_noFillval'][:] + assert_mask_matches(vardata, [False, False, False]) + assert_equal(vardata, [1,2,3]) + +def test_read_withFillValueAndMissingValue(): + # For a variable with both _FillValue and missing_value, the _FillValue + # should be used + IRRELEVANT_VALUE = 9999 + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var3_fillvalAndMissingValue'][:] + assert_mask_matches(vardata, [True, False, False]) + assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3]) + +def test_read_withMissingValue(): + # For a variable with missing_value but not _FillValue, the missing_value + # should be used + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var4_missingValue'][:] + assert_mask_matches(vardata, [False, True, False]) + +def test_read_withFillValNaN(): + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var5_fillvalNaN'][:] + assert_mask_matches(vardata, [False, True, False]) + +def test_read_withChar(): + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var6_char'][:] + assert_mask_matches(vardata, [False, True, False]) + +def test_read_with2dVar(): + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var7_2d'][:] + assert_mask_matches(vardata, [[True, False], [False, False], [False, True]]) + +def test_read_withMaskAndScaleFalse(): + # If a variable has a _FillValue (or missing_value) attribute, but is read + # with maskandscale set to False, the result should be unmasked + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + # Open file with mmap=False to avoid problems with closing a mmap'ed file + # when arrays referring to its data still exist: + with netcdf_file(fname, maskandscale=False, mmap=False) as f: + vardata = f.variables['var3_fillvalAndMissingValue'][:] + assert_mask_matches(vardata, [False, False, False]) + assert_equal(vardata, [1, 2, 3]) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/test_paths.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/test_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..84464e469b64579cf389dcea0fece7b0e9d36121 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/test_paths.py @@ -0,0 +1,93 @@ +""" +Ensure that we can use pathlib.Path objects in all relevant IO functions. +""" +from pathlib import Path + +import numpy as np + +import scipy.io +import scipy.io.wavfile +from scipy._lib._tmpdirs import tempdir +import scipy.sparse + + +class TestPaths: + data = np.arange(5).astype(np.int64) + + def test_savemat(self): + with tempdir() as temp_dir: + path = Path(temp_dir) / 'data.mat' + scipy.io.savemat(path, {'data': self.data}) + assert path.is_file() + + def test_loadmat(self): + # Save data with string path, load with pathlib.Path + with tempdir() as temp_dir: + path = Path(temp_dir) / 'data.mat' + scipy.io.savemat(str(path), {'data': self.data}) + + mat_contents = scipy.io.loadmat(path) + assert (mat_contents['data'] == self.data).all() + + def test_whosmat(self): + # Save data with string path, load with pathlib.Path + with tempdir() as temp_dir: + path = Path(temp_dir) / 'data.mat' + scipy.io.savemat(str(path), {'data': self.data}) + + contents = scipy.io.whosmat(path) + assert contents[0] == ('data', (1, 5), 'int64') + + def test_readsav(self): + path = Path(__file__).parent / 'data/scalar_string.sav' + scipy.io.readsav(path) + + def test_hb_read(self): + # Save data with string path, load with pathlib.Path + with tempdir() as temp_dir: + data = scipy.sparse.csr_matrix(scipy.sparse.eye(3)) + path = Path(temp_dir) / 'data.hb' + scipy.io.hb_write(str(path), data) + + data_new = scipy.io.hb_read(path) + assert (data_new != data).nnz == 0 + + def test_hb_write(self): + with tempdir() as temp_dir: + data = scipy.sparse.csr_matrix(scipy.sparse.eye(3)) + path = Path(temp_dir) / 'data.hb' + scipy.io.hb_write(path, data) + assert path.is_file() + + def test_mmio_read(self): + # Save data with string path, load with pathlib.Path + with tempdir() as temp_dir: + data = scipy.sparse.csr_matrix(scipy.sparse.eye(3)) + path = Path(temp_dir) / 'data.mtx' + scipy.io.mmwrite(str(path), data) + + data_new = scipy.io.mmread(path) + assert (data_new != data).nnz == 0 + + def test_mmio_write(self): + with tempdir() as temp_dir: + data = scipy.sparse.csr_matrix(scipy.sparse.eye(3)) + path = Path(temp_dir) / 'data.mtx' + scipy.io.mmwrite(path, data) + + def test_netcdf_file(self): + path = Path(__file__).parent / 'data/example_1.nc' + scipy.io.netcdf_file(path) + + def test_wavfile_read(self): + path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav' + scipy.io.wavfile.read(path) + + def test_wavfile_write(self): + # Read from str path, write to Path + input_path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav' + rate, data = scipy.io.wavfile.read(str(input_path)) + + with tempdir() as temp_dir: + output_path = Path(temp_dir) / input_path.name + scipy.io.wavfile.write(output_path, rate, data) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/test_wavfile.py b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/test_wavfile.py new file mode 100644 index 0000000000000000000000000000000000000000..0a048fad1f981a5af6651ed2170fed3847749972 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/io/tests/test_wavfile.py @@ -0,0 +1,426 @@ +import os +import sys +from io import BytesIO + +import numpy as np +from numpy.testing import (assert_equal, assert_, assert_array_equal, + break_cycles, suppress_warnings, IS_PYPY) +import pytest +from pytest import raises, warns + +from scipy.io import wavfile + + +def datafile(fn): + return os.path.join(os.path.dirname(__file__), 'data', fn) + + +def test_read_1(): + # 32-bit PCM (which uses extensible format) + for mmap in [False, True]: + filename = 'test-44100Hz-le-1ch-4bytes.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 44100) + assert_(np.issubdtype(data.dtype, np.int32)) + assert_equal(data.shape, (4410,)) + + del data + + +def test_read_2(): + # 8-bit unsigned PCM + for mmap in [False, True]: + filename = 'test-8000Hz-le-2ch-1byteu.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.uint8)) + assert_equal(data.shape, (800, 2)) + + del data + + +def test_read_3(): + # Little-endian float + for mmap in [False, True]: + filename = 'test-44100Hz-2ch-32bit-float-le.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 44100) + assert_(np.issubdtype(data.dtype, np.float32)) + assert_equal(data.shape, (441, 2)) + + del data + + +def test_read_4(): + # Contains unsupported 'PEAK' chunk + for mmap in [False, True]: + with suppress_warnings() as sup: + sup.filter(wavfile.WavFileWarning, + "Chunk .non-data. not understood, skipping it") + filename = 'test-48000Hz-2ch-64bit-float-le-wavex.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 48000) + assert_(np.issubdtype(data.dtype, np.float64)) + assert_equal(data.shape, (480, 2)) + + del data + + +def test_read_5(): + # Big-endian float + for mmap in [False, True]: + filename = 'test-44100Hz-2ch-32bit-float-be.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 44100) + assert_(np.issubdtype(data.dtype, np.float32)) + assert_(data.dtype.byteorder == '>' or (sys.byteorder == 'big' and + data.dtype.byteorder == '=')) + assert_equal(data.shape, (441, 2)) + + del data + + +def test_5_bit_odd_size_no_pad(): + # 5-bit, 1 B container, 5 channels, 9 samples, 45 B data chunk + # Generated by LTspice, which incorrectly omits pad byte, but should be + # readable anyway + for mmap in [False, True]: + filename = 'test-8000Hz-le-5ch-9S-5bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.uint8)) + assert_equal(data.shape, (9, 5)) + + # 8-5 = 3 LSBits should be 0 + assert_equal(data & 0b00000111, 0) + + # Unsigned + assert_equal(data.max(), 0b11111000) # Highest possible + assert_equal(data[0, 0], 128) # Midpoint is 128 for <= 8-bit + assert_equal(data.min(), 0) # Lowest possible + + del data + + +def test_12_bit_even_size(): + # 12-bit, 2 B container, 4 channels, 9 samples, 72 B data chunk + # Generated by LTspice from 1 Vpk sine waves + for mmap in [False, True]: + filename = 'test-8000Hz-le-4ch-9S-12bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=mmap) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.int16)) + assert_equal(data.shape, (9, 4)) + + # 16-12 = 4 LSBits should be 0 + assert_equal(data & 0b00000000_00001111, 0) + + # Signed + assert_equal(data.max(), 0b01111111_11110000) # Highest possible + assert_equal(data[0, 0], 0) # Midpoint is 0 for >= 9-bit + assert_equal(data.min(), -0b10000000_00000000) # Lowest possible + + del data + + +def test_24_bit_odd_size_with_pad(): + # 24-bit, 3 B container, 3 channels, 5 samples, 45 B data chunk + # Should not raise any warnings about the data chunk pad byte + filename = 'test-8000Hz-le-3ch-5S-24bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=False) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.int32)) + assert_equal(data.shape, (5, 3)) + + # All LSBytes should be 0 + assert_equal(data & 0xff, 0) + + # Hand-made max/min samples under different conventions: + # 2**(N-1) 2**(N-1)-1 LSB + assert_equal(data, [[-0x8000_0000, -0x7fff_ff00, -0x200], + [-0x4000_0000, -0x3fff_ff00, -0x100], + [+0x0000_0000, +0x0000_0000, +0x000], + [+0x4000_0000, +0x3fff_ff00, +0x100], + [+0x7fff_ff00, +0x7fff_ff00, +0x200]]) + # ^ clipped + + +def test_20_bit_extra_data(): + # 20-bit, 3 B container, 1 channel, 10 samples, 30 B data chunk + # with extra data filling container beyond the bit depth + filename = 'test-8000Hz-le-1ch-10S-20bit-extra.wav' + rate, data = wavfile.read(datafile(filename), mmap=False) + + assert_equal(rate, 1234) + assert_(np.issubdtype(data.dtype, np.int32)) + assert_equal(data.shape, (10,)) + + # All LSBytes should still be 0, because 3 B container in 4 B dtype + assert_equal(data & 0xff, 0) + + # But it should load the data beyond 20 bits + assert_((data & 0xf00).any()) + + # Full-scale positive/negative samples, then being halved each time + assert_equal(data, [+0x7ffff000, # +full-scale 20-bit + -0x7ffff000, # -full-scale 20-bit + +0x7ffff000 >> 1, # +1/2 + -0x7ffff000 >> 1, # -1/2 + +0x7ffff000 >> 2, # +1/4 + -0x7ffff000 >> 2, # -1/4 + +0x7ffff000 >> 3, # +1/8 + -0x7ffff000 >> 3, # -1/8 + +0x7ffff000 >> 4, # +1/16 + -0x7ffff000 >> 4, # -1/16 + ]) + + +def test_36_bit_odd_size(): + # 36-bit, 5 B container, 3 channels, 5 samples, 75 B data chunk + pad + filename = 'test-8000Hz-le-3ch-5S-36bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=False) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.int64)) + assert_equal(data.shape, (5, 3)) + + # 28 LSBits should be 0 + assert_equal(data & 0xfffffff, 0) + + # Hand-made max/min samples under different conventions: + # Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB + correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_f000_0000, -0x2000_0000], + [-0x4000_0000_0000_0000, -0x3fff_ffff_f000_0000, -0x1000_0000], + [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0000_0000], + [+0x4000_0000_0000_0000, +0x3fff_ffff_f000_0000, +0x1000_0000], + [+0x7fff_ffff_f000_0000, +0x7fff_ffff_f000_0000, +0x2000_0000]] + # ^ clipped + + assert_equal(data, correct) + + +def test_45_bit_even_size(): + # 45-bit, 6 B container, 3 channels, 5 samples, 90 B data chunk + filename = 'test-8000Hz-le-3ch-5S-45bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=False) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.int64)) + assert_equal(data.shape, (5, 3)) + + # 19 LSBits should be 0 + assert_equal(data & 0x7ffff, 0) + + # Hand-made max/min samples under different conventions: + # Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB + correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_fff8_0000, -0x10_0000], + [-0x4000_0000_0000_0000, -0x3fff_ffff_fff8_0000, -0x08_0000], + [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x00_0000], + [+0x4000_0000_0000_0000, +0x3fff_ffff_fff8_0000, +0x08_0000], + [+0x7fff_ffff_fff8_0000, +0x7fff_ffff_fff8_0000, +0x10_0000]] + # ^ clipped + + assert_equal(data, correct) + + +def test_53_bit_odd_size(): + # 53-bit, 7 B container, 3 channels, 5 samples, 105 B data chunk + pad + filename = 'test-8000Hz-le-3ch-5S-53bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=False) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.int64)) + assert_equal(data.shape, (5, 3)) + + # 11 LSBits should be 0 + assert_equal(data & 0x7ff, 0) + + # Hand-made max/min samples under different conventions: + # Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB + correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_ffff_f800, -0x1000], + [-0x4000_0000_0000_0000, -0x3fff_ffff_ffff_f800, -0x0800], + [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0000], + [+0x4000_0000_0000_0000, +0x3fff_ffff_ffff_f800, +0x0800], + [+0x7fff_ffff_ffff_f800, +0x7fff_ffff_ffff_f800, +0x1000]] + # ^ clipped + + assert_equal(data, correct) + + +def test_64_bit_even_size(): + # 64-bit, 8 B container, 3 channels, 5 samples, 120 B data chunk + for mmap in [False, True]: + filename = 'test-8000Hz-le-3ch-5S-64bit.wav' + rate, data = wavfile.read(datafile(filename), mmap=False) + + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.int64)) + assert_equal(data.shape, (5, 3)) + + # Hand-made max/min samples under different conventions: + # Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB + correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_ffff_ffff, -0x2], + [-0x4000_0000_0000_0000, -0x3fff_ffff_ffff_ffff, -0x1], + [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0], + [+0x4000_0000_0000_0000, +0x3fff_ffff_ffff_ffff, +0x1], + [+0x7fff_ffff_ffff_ffff, +0x7fff_ffff_ffff_ffff, +0x2]] + # ^ clipped + + assert_equal(data, correct) + + del data + + +def test_unsupported_mmap(): + # Test containers that cannot be mapped to numpy types + for filename in {'test-8000Hz-le-3ch-5S-24bit.wav', + 'test-8000Hz-le-3ch-5S-36bit.wav', + 'test-8000Hz-le-3ch-5S-45bit.wav', + 'test-8000Hz-le-3ch-5S-53bit.wav', + 'test-8000Hz-le-1ch-10S-20bit-extra.wav'}: + with raises(ValueError, match="mmap.*not compatible"): + rate, data = wavfile.read(datafile(filename), mmap=True) + + +def test_rifx(): + # Compare equivalent RIFX and RIFF files + for rifx, riff in {('test-44100Hz-be-1ch-4bytes.wav', + 'test-44100Hz-le-1ch-4bytes.wav'), + ('test-8000Hz-be-3ch-5S-24bit.wav', + 'test-8000Hz-le-3ch-5S-24bit.wav')}: + rate1, data1 = wavfile.read(datafile(rifx), mmap=False) + rate2, data2 = wavfile.read(datafile(riff), mmap=False) + assert_equal(rate1, rate2) + assert_equal(data1, data2) + + +def test_read_unknown_filetype_fail(): + # Not an RIFF + for mmap in [False, True]: + filename = 'example_1.nc' + with open(datafile(filename), 'rb') as fp: + with raises(ValueError, match="CDF.*'RIFF' and 'RIFX' supported"): + wavfile.read(fp, mmap=mmap) + + +def test_read_unknown_riff_form_type(): + # RIFF, but not WAVE form + for mmap in [False, True]: + filename = 'Transparent Busy.ani' + with open(datafile(filename), 'rb') as fp: + with raises(ValueError, match='Not a WAV file.*ACON'): + wavfile.read(fp, mmap=mmap) + + +def test_read_unknown_wave_format(): + # RIFF and WAVE, but not supported format + for mmap in [False, True]: + filename = 'test-8000Hz-le-1ch-1byte-ulaw.wav' + with open(datafile(filename), 'rb') as fp: + with raises(ValueError, match='Unknown wave file format.*MULAW.*' + 'Supported formats'): + wavfile.read(fp, mmap=mmap) + + +def test_read_early_eof_with_data(): + # File ends inside 'data' chunk, but we keep incomplete data + for mmap in [False, True]: + filename = 'test-44100Hz-le-1ch-4bytes-early-eof.wav' + with open(datafile(filename), 'rb') as fp: + with warns(wavfile.WavFileWarning, match='Reached EOF'): + rate, data = wavfile.read(fp, mmap=mmap) + assert data.size > 0 + assert rate == 44100 + # also test writing (gh-12176) + data[0] = 0 + + +def test_read_early_eof(): + # File ends after 'fact' chunk at boundary, no data read + for mmap in [False, True]: + filename = 'test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav' + with open(datafile(filename), 'rb') as fp: + with raises(ValueError, match="Unexpected end of file."): + wavfile.read(fp, mmap=mmap) + + +def test_read_incomplete_chunk(): + # File ends inside 'fmt ' chunk ID, no data read + for mmap in [False, True]: + filename = 'test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav' + with open(datafile(filename), 'rb') as fp: + with raises(ValueError, match="Incomplete chunk ID.*b'f'"): + wavfile.read(fp, mmap=mmap) + + +def test_read_inconsistent_header(): + # File header's size fields contradict each other + for mmap in [False, True]: + filename = 'test-8000Hz-le-3ch-5S-24bit-inconsistent.wav' + with open(datafile(filename), 'rb') as fp: + with raises(ValueError, match="header is invalid"): + wavfile.read(fp, mmap=mmap) + + +# signed 8-bit integer PCM is not allowed +# unsigned > 8-bit integer PCM is not allowed +# 8- or 16-bit float PCM is not expected +# g and q are platform-dependent, so not included +@pytest.mark.parametrize("dt_str", ["i2", ">i4", ">i8", ">f4", ">f8", '|u1']) +@pytest.mark.parametrize("channels", [1, 2, 5]) +@pytest.mark.parametrize("rate", [8000, 32000]) +@pytest.mark.parametrize("mmap", [False, True]) +@pytest.mark.parametrize("realfile", [False, True]) +def test_write_roundtrip(realfile, mmap, rate, channels, dt_str, tmpdir): + dtype = np.dtype(dt_str) + if realfile: + tmpfile = str(tmpdir.join('temp.wav')) + else: + tmpfile = BytesIO() + data = np.random.rand(100, channels) + if channels == 1: + data = data[:, 0] + if dtype.kind == 'f': + # The range of the float type should be in [-1, 1] + data = data.astype(dtype) + else: + data = (data*128).astype(dtype) + + wavfile.write(tmpfile, rate, data) + + rate2, data2 = wavfile.read(tmpfile, mmap=mmap) + + assert_equal(rate, rate2) + assert_(data2.dtype.byteorder in ('<', '=', '|'), msg=data2.dtype) + assert_array_equal(data, data2) + # also test writing (gh-12176) + if realfile: + data2[0] = 0 + else: + with pytest.raises(ValueError, match='read-only'): + data2[0] = 0 + + if realfile and mmap and IS_PYPY and sys.platform == 'win32': + # windows cannot remove a dead file held by a mmap but not collected + # in PyPy; since the filename gets reused in this test, clean this up + break_cycles() + break_cycles() + + +@pytest.mark.parametrize("dtype", [np.float16]) +def test_wavfile_dtype_unsupported(tmpdir, dtype): + tmpfile = str(tmpdir.join('temp.wav')) + rng = np.random.default_rng(1234) + data = rng.random((100, 5)).astype(dtype) + rate = 8000 + with pytest.raises(ValueError, match="Unsupported"): + wavfile.write(tmpfile, rate, data) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_cobyla_py.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_cobyla_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea2e3126766a2e1b9160981e350dad320754e9c8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_cobyla_py.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentiate.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentiate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75c03bae833615916866ef5cf79ef2a1dc4e2e04 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentiate.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..737df7db46e3e9bcfd0f7f374b62a99c0749bd5a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba740e57748b41200b62951129c5f19db8140ec0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a57e6c3cc7321a22d75b34638209e6fbeb54e2a7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39004bdf2c9767bdce80ea0c67aa8824b68dea57 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_dogleg.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_dogleg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0d1f5d2772cbd868c5648daf9395d2b1714f093 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_dogleg.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/cobyla.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/cobyla.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5d7db72b882413b71326290c3a23e2dfff8247a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/cobyla.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/tnc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/tnc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1ec17bf8ffe9ce766fac9f601354010279a132f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/tnc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/zeros.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/zeros.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb4d5ac61548433d1e9e712067ea74329231cb99 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/optimize/__pycache__/zeros.cpython-310.pyc differ