applied-ai-018 commited on
Commit
0919000
·
verified ·
1 Parent(s): 08183e2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/11.input_layernorm.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  4. venv/lib/python3.10/site-packages/scipy/cluster/__init__.py +31 -0
  5. venv/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so +0 -0
  6. venv/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so +0 -0
  7. venv/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so +0 -0
  8. venv/lib/python3.10/site-packages/scipy/cluster/hierarchy.py +0 -0
  9. venv/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py +145 -0
  10. venv/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py +1225 -0
  11. venv/lib/python3.10/site-packages/scipy/cluster/vq.py +835 -0
  12. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__init__.py +71 -0
  46. venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py +153 -0
  50. venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so +0 -0
ckpts/universal/global_step40/zero/11.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f744b2dee30f8590d57d433571c451f71516cfc0d8dda4aeb56cdb49f8ab3e69
3
+ size 9293
ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a33f07f55c8a1ab36bf4ff987be5711b50931635a3de9d5197d6759952d70b87
3
+ size 33555612
ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cba805c3aad0e844d7f976deb1110bd57596a4c1fbc043a118bcb534d97dea9
3
+ size 33555627
venv/lib/python3.10/site-packages/scipy/cluster/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =========================================
3
+ Clustering package (:mod:`scipy.cluster`)
4
+ =========================================
5
+
6
+ .. currentmodule:: scipy.cluster
7
+
8
+ .. toctree::
9
+ :hidden:
10
+
11
+ cluster.vq
12
+ cluster.hierarchy
13
+
14
+ Clustering algorithms are useful in information theory, target detection,
15
+ communications, compression, and other areas. The `vq` module only
16
+ supports vector quantization and the k-means algorithms.
17
+
18
+ The `hierarchy` module provides functions for hierarchical and
19
+ agglomerative clustering. Its features include generating hierarchical
20
+ clusters from distance matrices,
21
+ calculating statistics on clusters, cutting linkages
22
+ to generate flat clusters, and visualizing clusters with dendrograms.
23
+
24
+ """
25
+ __all__ = ['vq', 'hierarchy']
26
+
27
+ from . import vq, hierarchy
28
+
29
+ from scipy._lib._testutils import PytestTester
30
+ test = PytestTester(__name__)
31
+ del PytestTester
venv/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (423 kB). View file
 
venv/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (356 kB). View file
 
venv/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (128 kB). View file
 
venv/lib/python3.10/site-packages/scipy/cluster/hierarchy.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy import array
2
+
3
+
4
+ Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02],
5
+ [7.50205180e-01, 4.60299830e-01, 8.98696460e-01],
6
+ [6.65461230e-01, 6.94011420e-01, 9.10465700e-01],
7
+ [9.64047590e-01, 1.43082200e-03, 7.39874220e-01],
8
+ [1.08159060e-01, 5.53028790e-01, 6.63804780e-02],
9
+ [9.31359130e-01, 8.25424910e-01, 9.52315440e-01],
10
+ [6.78086960e-01, 3.41903970e-01, 5.61481950e-01],
11
+ [9.82730940e-01, 7.04605210e-01, 8.70978630e-02],
12
+ [6.14691610e-01, 4.69989230e-02, 6.02406450e-01],
13
+ [5.80161260e-01, 9.17354970e-01, 5.88163850e-01],
14
+ [1.38246310e+00, 1.96358160e+00, 1.94437880e+00],
15
+ [2.10675860e+00, 1.67148730e+00, 1.34854480e+00],
16
+ [1.39880070e+00, 1.66142050e+00, 1.32224550e+00],
17
+ [1.71410460e+00, 1.49176380e+00, 1.45432170e+00],
18
+ [1.54102340e+00, 1.84374950e+00, 1.64658950e+00],
19
+ [2.08512480e+00, 1.84524350e+00, 2.17340850e+00],
20
+ [1.30748740e+00, 1.53801650e+00, 2.16007740e+00],
21
+ [1.41447700e+00, 1.99329070e+00, 1.99107420e+00],
22
+ [1.61943490e+00, 1.47703280e+00, 1.89788160e+00],
23
+ [1.59880600e+00, 1.54988980e+00, 1.57563350e+00],
24
+ [3.37247380e+00, 2.69635310e+00, 3.39981700e+00],
25
+ [3.13705120e+00, 3.36528090e+00, 3.06089070e+00],
26
+ [3.29413250e+00, 3.19619500e+00, 2.90700170e+00],
27
+ [2.65510510e+00, 3.06785900e+00, 2.97198540e+00],
28
+ [3.30941040e+00, 2.59283970e+00, 2.57714110e+00],
29
+ [2.59557220e+00, 3.33477370e+00, 3.08793190e+00],
30
+ [2.58206180e+00, 3.41615670e+00, 3.26441990e+00],
31
+ [2.71127000e+00, 2.77032450e+00, 2.63466500e+00],
32
+ [2.79617850e+00, 3.25473720e+00, 3.41801560e+00],
33
+ [2.64741750e+00, 2.54538040e+00, 3.25354110e+00]])
34
+
35
+ ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754.,
36
+ 564., 138., 219., 869., 669.])
37
+
38
+ linkage_ytdist_single = array([[2., 5., 138., 2.],
39
+ [3., 4., 219., 2.],
40
+ [0., 7., 255., 3.],
41
+ [1., 8., 268., 4.],
42
+ [6., 9., 295., 6.]])
43
+
44
+ linkage_ytdist_complete = array([[2., 5., 138., 2.],
45
+ [3., 4., 219., 2.],
46
+ [1., 6., 400., 3.],
47
+ [0., 7., 412., 3.],
48
+ [8., 9., 996., 6.]])
49
+
50
+ linkage_ytdist_average = array([[2., 5., 138., 2.],
51
+ [3., 4., 219., 2.],
52
+ [0., 7., 333.5, 3.],
53
+ [1., 6., 347.5, 3.],
54
+ [8., 9., 680.77777778, 6.]])
55
+
56
+ linkage_ytdist_weighted = array([[2., 5., 138., 2.],
57
+ [3., 4., 219., 2.],
58
+ [0., 7., 333.5, 3.],
59
+ [1., 6., 347.5, 3.],
60
+ [8., 9., 670.125, 6.]])
61
+
62
+ # the optimal leaf ordering of linkage_ytdist_single
63
+ linkage_ytdist_single_olo = array([[5., 2., 138., 2.],
64
+ [4., 3., 219., 2.],
65
+ [7., 0., 255., 3.],
66
+ [1., 8., 268., 4.],
67
+ [6., 9., 295., 6.]])
68
+
69
+ X = array([[1.43054825, -7.5693489],
70
+ [6.95887839, 6.82293382],
71
+ [2.87137846, -9.68248579],
72
+ [7.87974764, -6.05485803],
73
+ [8.24018364, -6.09495602],
74
+ [7.39020262, 8.54004355]])
75
+
76
+ linkage_X_centroid = array([[3., 4., 0.36265956, 2.],
77
+ [1., 5., 1.77045373, 2.],
78
+ [0., 2., 2.55760419, 2.],
79
+ [6., 8., 6.43614494, 4.],
80
+ [7., 9., 15.17363237, 6.]])
81
+
82
+ linkage_X_median = array([[3., 4., 0.36265956, 2.],
83
+ [1., 5., 1.77045373, 2.],
84
+ [0., 2., 2.55760419, 2.],
85
+ [6., 8., 6.43614494, 4.],
86
+ [7., 9., 15.17363237, 6.]])
87
+
88
+ linkage_X_ward = array([[3., 4., 0.36265956, 2.],
89
+ [1., 5., 1.77045373, 2.],
90
+ [0., 2., 2.55760419, 2.],
91
+ [6., 8., 9.10208346, 4.],
92
+ [7., 9., 24.7784379, 6.]])
93
+
94
+ # the optimal leaf ordering of linkage_X_ward
95
+ linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.],
96
+ [5., 1., 1.77045373, 2.],
97
+ [2., 0., 2.55760419, 2.],
98
+ [6., 8., 9.10208346, 4.],
99
+ [7., 9., 24.7784379, 6.]])
100
+
101
+ inconsistent_ytdist = {
102
+ 1: array([[138., 0., 1., 0.],
103
+ [219., 0., 1., 0.],
104
+ [255., 0., 1., 0.],
105
+ [268., 0., 1., 0.],
106
+ [295., 0., 1., 0.]]),
107
+ 2: array([[138., 0., 1., 0.],
108
+ [219., 0., 1., 0.],
109
+ [237., 25.45584412, 2., 0.70710678],
110
+ [261.5, 9.19238816, 2., 0.70710678],
111
+ [233.66666667, 83.9424406, 3., 0.7306594]]),
112
+ 3: array([[138., 0., 1., 0.],
113
+ [219., 0., 1., 0.],
114
+ [237., 25.45584412, 2., 0.70710678],
115
+ [247.33333333, 25.38372182, 3., 0.81417007],
116
+ [239., 69.36377537, 4., 0.80733783]]),
117
+ 4: array([[138., 0., 1., 0.],
118
+ [219., 0., 1., 0.],
119
+ [237., 25.45584412, 2., 0.70710678],
120
+ [247.33333333, 25.38372182, 3., 0.81417007],
121
+ [235., 60.73302232, 5., 0.98793042]])}
122
+
123
+ fcluster_inconsistent = {
124
+ 0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
125
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
126
+ 1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
127
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
128
+ 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
129
+ 1, 1, 1, 1, 1, 1, 1, 1, 1])}
130
+
131
+ fcluster_distance = {
132
+ 0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3,
133
+ 1, 1, 1, 2, 1, 1, 1, 1, 1]),
134
+ 1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1,
135
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
136
+ 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
137
+ 1, 1, 1, 1, 1, 1, 1, 1, 1])}
138
+
139
+ fcluster_maxclust = {
140
+ 8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4,
141
+ 1, 1, 1, 3, 1, 1, 1, 1, 2]),
142
+ 4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2,
143
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
144
+ 1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
145
+ 1, 1, 1, 1, 1, 1, 1, 1, 1])}
venv/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py ADDED
@@ -0,0 +1,1225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Author: Damian Eads
3
+ # Date: April 17, 2008
4
+ #
5
+ # Copyright (C) 2008 Damian Eads
6
+ #
7
+ # Redistribution and use in source and binary forms, with or without
8
+ # modification, are permitted provided that the following conditions
9
+ # are met:
10
+ #
11
+ # 1. Redistributions of source code must retain the above copyright
12
+ # notice, this list of conditions and the following disclaimer.
13
+ #
14
+ # 2. Redistributions in binary form must reproduce the above
15
+ # copyright notice, this list of conditions and the following
16
+ # disclaimer in the documentation and/or other materials provided
17
+ # with the distribution.
18
+ #
19
+ # 3. The name of the author may not be used to endorse or promote
20
+ # products derived from this software without specific prior
21
+ # written permission.
22
+ #
23
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
24
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
27
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
29
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
+ import numpy as np
35
+ from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns
36
+ import pytest
37
+ from pytest import raises as assert_raises
38
+
39
+ import scipy.cluster.hierarchy
40
+ from scipy.cluster.hierarchy import (
41
+ ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage,
42
+ num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster,
43
+ is_isomorphic, single, leaders,
44
+ correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
45
+ is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
46
+ set_link_color_palette, cut_tree, optimal_leaf_ordering,
47
+ _order_cluster_tree, _hierarchy, _LINKAGE_METHODS)
48
+ from scipy.spatial.distance import pdist
49
+ from scipy.cluster._hierarchy import Heap
50
+ from scipy.conftest import array_api_compatible
51
+ from scipy._lib._array_api import xp_assert_close, xp_assert_equal
52
+
53
+ from . import hierarchy_test_data
54
+
55
+
56
+ # Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
57
+ # check if it's available
58
+ try:
59
+ import matplotlib
60
+ # and set the backend to be Agg (no gui)
61
+ matplotlib.use('Agg')
62
+ # before importing pyplot
63
+ import matplotlib.pyplot as plt
64
+ have_matplotlib = True
65
+ except Exception:
66
+ have_matplotlib = False
67
+
68
+
69
+ pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")]
70
+ skip_if_array_api = pytest.mark.skip_if_array_api
71
+
72
+
73
+ class TestLinkage:
74
+
75
+ @skip_if_array_api(cpu_only=True)
76
+ def test_linkage_non_finite_elements_in_distance_matrix(self, xp):
77
+ # Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf).
78
+ # Exception expected.
79
+ y = xp.zeros((6,))
80
+ y[0] = xp.nan
81
+ assert_raises(ValueError, linkage, y)
82
+
83
+ @skip_if_array_api(cpu_only=True)
84
+ def test_linkage_empty_distance_matrix(self, xp):
85
+ # Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
86
+ y = xp.zeros((0,))
87
+ assert_raises(ValueError, linkage, y)
88
+
89
+ @skip_if_array_api(cpu_only=True)
90
+ def test_linkage_tdist(self, xp):
91
+ for method in ['single', 'complete', 'average', 'weighted']:
92
+ self.check_linkage_tdist(method, xp)
93
+
94
+ def check_linkage_tdist(self, method, xp):
95
+ # Tests linkage(Y, method) on the tdist data set.
96
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), method)
97
+ expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
98
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10)
99
+
100
+ @skip_if_array_api(cpu_only=True)
101
+ def test_linkage_X(self, xp):
102
+ for method in ['centroid', 'median', 'ward']:
103
+ self.check_linkage_q(method, xp)
104
+
105
+ def check_linkage_q(self, method, xp):
106
+ # Tests linkage(Y, method) on the Q data set.
107
+ Z = linkage(xp.asarray(hierarchy_test_data.X), method)
108
+ expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
109
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
110
+
111
+ y = scipy.spatial.distance.pdist(hierarchy_test_data.X,
112
+ metric="euclidean")
113
+ Z = linkage(xp.asarray(y), method)
114
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
115
+
116
+ @skip_if_array_api(cpu_only=True)
117
+ def test_compare_with_trivial(self, xp):
118
+ rng = np.random.RandomState(0)
119
+ n = 20
120
+ X = rng.rand(n, 2)
121
+ d = pdist(X)
122
+
123
+ for method, code in _LINKAGE_METHODS.items():
124
+ Z_trivial = _hierarchy.linkage(d, n, code)
125
+ Z = linkage(xp.asarray(d), method)
126
+ xp_assert_close(Z, xp.asarray(Z_trivial), rtol=1e-14, atol=1e-15)
127
+
128
+ @skip_if_array_api(cpu_only=True)
129
+ def test_optimal_leaf_ordering(self, xp):
130
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), optimal_ordering=True)
131
+ expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo')
132
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10)
133
+
134
+
135
+ @skip_if_array_api(cpu_only=True)
136
+ class TestLinkageTies:
137
+
138
+ _expectations = {
139
+ 'single': np.array([[0, 1, 1.41421356, 2],
140
+ [2, 3, 1.41421356, 3]]),
141
+ 'complete': np.array([[0, 1, 1.41421356, 2],
142
+ [2, 3, 2.82842712, 3]]),
143
+ 'average': np.array([[0, 1, 1.41421356, 2],
144
+ [2, 3, 2.12132034, 3]]),
145
+ 'weighted': np.array([[0, 1, 1.41421356, 2],
146
+ [2, 3, 2.12132034, 3]]),
147
+ 'centroid': np.array([[0, 1, 1.41421356, 2],
148
+ [2, 3, 2.12132034, 3]]),
149
+ 'median': np.array([[0, 1, 1.41421356, 2],
150
+ [2, 3, 2.12132034, 3]]),
151
+ 'ward': np.array([[0, 1, 1.41421356, 2],
152
+ [2, 3, 2.44948974, 3]]),
153
+ }
154
+
155
+ def test_linkage_ties(self, xp):
156
+ for method in ['single', 'complete', 'average', 'weighted',
157
+ 'centroid', 'median', 'ward']:
158
+ self.check_linkage_ties(method, xp)
159
+
160
+ def check_linkage_ties(self, method, xp):
161
+ X = xp.asarray([[-1, -1], [0, 0], [1, 1]])
162
+ Z = linkage(X, method=method)
163
+ expectedZ = self._expectations[method]
164
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
165
+
166
+
167
+ @skip_if_array_api(cpu_only=True)
168
+ class TestInconsistent:
169
+
170
+ def test_inconsistent_tdist(self, xp):
171
+ for depth in hierarchy_test_data.inconsistent_ytdist:
172
+ self.check_inconsistent_tdist(depth, xp)
173
+
174
+ def check_inconsistent_tdist(self, depth, xp):
175
+ Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single)
176
+ xp_assert_close(inconsistent(Z, depth),
177
+ xp.asarray(hierarchy_test_data.inconsistent_ytdist[depth]))
178
+
179
+
180
+ @skip_if_array_api(cpu_only=True)
181
+ class TestCopheneticDistance:
182
+
183
+ def test_linkage_cophenet_tdist_Z(self, xp):
184
+ # Tests cophenet(Z) on tdist data set.
185
+ expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
186
+ 295, 138, 219, 295, 295])
187
+ Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single)
188
+ M = cophenet(Z)
189
+ xp_assert_close(M, xp.asarray(expectedM, dtype=xp.float64), atol=1e-10)
190
+
191
+ def test_linkage_cophenet_tdist_Z_Y(self, xp):
192
+ # Tests cophenet(Z, Y) on tdist data set.
193
+ Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single)
194
+ (c, M) = cophenet(Z, xp.asarray(hierarchy_test_data.ytdist))
195
+ expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
196
+ 295, 138, 219, 295, 295], dtype=xp.float64)
197
+ expectedc = xp.asarray(0.639931296433393415057366837573, dtype=xp.float64)[()]
198
+ xp_assert_close(c, expectedc, atol=1e-10)
199
+ xp_assert_close(M, expectedM, atol=1e-10)
200
+
201
+
202
+ class TestMLabLinkageConversion:
203
+
204
+ def test_mlab_linkage_conversion_empty(self, xp):
205
+ # Tests from/to_mlab_linkage on empty linkage array.
206
+ X = xp.asarray([], dtype=xp.float64)
207
+ xp_assert_equal(from_mlab_linkage(X), X)
208
+ xp_assert_equal(to_mlab_linkage(X), X)
209
+
210
+ @skip_if_array_api(cpu_only=True)
211
+ def test_mlab_linkage_conversion_single_row(self, xp):
212
+ # Tests from/to_mlab_linkage on linkage array with single row.
213
+ Z = xp.asarray([[0., 1., 3., 2.]])
214
+ Zm = xp.asarray([[1, 2, 3]])
215
+ xp_assert_close(from_mlab_linkage(Zm), xp.asarray(Z, dtype=xp.float64),
216
+ rtol=1e-15)
217
+ xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64),
218
+ rtol=1e-15)
219
+
220
+ @skip_if_array_api(cpu_only=True)
221
+ def test_mlab_linkage_conversion_multiple_rows(self, xp):
222
+ # Tests from/to_mlab_linkage on linkage array with multiple rows.
223
+ Zm = xp.asarray([[3, 6, 138], [4, 5, 219],
224
+ [1, 8, 255], [2, 9, 268], [7, 10, 295]])
225
+ Z = xp.asarray([[2., 5., 138., 2.],
226
+ [3., 4., 219., 2.],
227
+ [0., 7., 255., 3.],
228
+ [1., 8., 268., 4.],
229
+ [6., 9., 295., 6.]],
230
+ dtype=xp.float64)
231
+ xp_assert_close(from_mlab_linkage(Zm), Z, rtol=1e-15)
232
+ xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64),
233
+ rtol=1e-15)
234
+
235
+
236
+ @skip_if_array_api(cpu_only=True)
237
+ class TestFcluster:
238
+
239
+ def test_fclusterdata(self, xp):
240
+ for t in hierarchy_test_data.fcluster_inconsistent:
241
+ self.check_fclusterdata(t, 'inconsistent', xp)
242
+ for t in hierarchy_test_data.fcluster_distance:
243
+ self.check_fclusterdata(t, 'distance', xp)
244
+ for t in hierarchy_test_data.fcluster_maxclust:
245
+ self.check_fclusterdata(t, 'maxclust', xp)
246
+
247
+ def check_fclusterdata(self, t, criterion, xp):
248
+ # Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set
249
+ expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t])
250
+ X = xp.asarray(hierarchy_test_data.Q_X)
251
+ T = fclusterdata(X, criterion=criterion, t=t)
252
+ assert_(is_isomorphic(T, expectedT))
253
+
254
+ def test_fcluster(self, xp):
255
+ for t in hierarchy_test_data.fcluster_inconsistent:
256
+ self.check_fcluster(t, 'inconsistent', xp)
257
+ for t in hierarchy_test_data.fcluster_distance:
258
+ self.check_fcluster(t, 'distance', xp)
259
+ for t in hierarchy_test_data.fcluster_maxclust:
260
+ self.check_fcluster(t, 'maxclust', xp)
261
+
262
+ def check_fcluster(self, t, criterion, xp):
263
+ # Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
264
+ expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t])
265
+ Z = single(xp.asarray(hierarchy_test_data.Q_X))
266
+ T = fcluster(Z, criterion=criterion, t=t)
267
+ assert_(is_isomorphic(T, expectedT))
268
+
269
+ def test_fcluster_monocrit(self, xp):
270
+ for t in hierarchy_test_data.fcluster_distance:
271
+ self.check_fcluster_monocrit(t, xp)
272
+ for t in hierarchy_test_data.fcluster_maxclust:
273
+ self.check_fcluster_maxclust_monocrit(t, xp)
274
+
275
+ def check_fcluster_monocrit(self, t, xp):
276
+ expectedT = xp.asarray(hierarchy_test_data.fcluster_distance[t])
277
+ Z = single(xp.asarray(hierarchy_test_data.Q_X))
278
+ T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
279
+ assert_(is_isomorphic(T, expectedT))
280
+
281
+ def check_fcluster_maxclust_monocrit(self, t, xp):
282
+ expectedT = xp.asarray(hierarchy_test_data.fcluster_maxclust[t])
283
+ Z = single(xp.asarray(hierarchy_test_data.Q_X))
284
+ T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
285
+ assert_(is_isomorphic(T, expectedT))
286
+
287
+
288
+ @skip_if_array_api(cpu_only=True)
289
+ class TestLeaders:
290
+
291
+ def test_leaders_single(self, xp):
292
+ # Tests leaders using a flat clustering generated by single linkage.
293
+ X = hierarchy_test_data.Q_X
294
+ Y = pdist(X)
295
+ Y = xp.asarray(Y)
296
+ Z = linkage(Y)
297
+ T = fcluster(Z, criterion='maxclust', t=3)
298
+ Lright = (xp.asarray([53, 55, 56]), xp.asarray([2, 3, 1]))
299
+ T = xp.asarray(T, dtype=xp.int32)
300
+ L = leaders(Z, T)
301
+ assert_allclose(np.concatenate(L), np.concatenate(Lright), rtol=1e-15)
302
+
303
+
304
+ @skip_if_array_api(np_only=True,
305
+ reasons=['`is_isomorphic` only supports NumPy backend'])
306
+ class TestIsIsomorphic:
307
+
308
+ @skip_if_array_api(np_only=True,
309
+ reasons=['array-likes only supported for NumPy backend'])
310
+ def test_array_like(self, xp):
311
+ assert is_isomorphic([1, 1, 1], [2, 2, 2])
312
+ assert is_isomorphic([], [])
313
+
314
+ def test_is_isomorphic_1(self, xp):
315
+ # Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
316
+ a = xp.asarray([1, 1, 1])
317
+ b = xp.asarray([2, 2, 2])
318
+ assert is_isomorphic(a, b)
319
+ assert is_isomorphic(b, a)
320
+
321
+ def test_is_isomorphic_2(self, xp):
322
+ # Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
323
+ a = xp.asarray([1, 7, 1])
324
+ b = xp.asarray([2, 3, 2])
325
+ assert is_isomorphic(a, b)
326
+ assert is_isomorphic(b, a)
327
+
328
+ def test_is_isomorphic_3(self, xp):
329
+ # Tests is_isomorphic on test case #3 (no flat clusters)
330
+ a = xp.asarray([])
331
+ b = xp.asarray([])
332
+ assert is_isomorphic(a, b)
333
+
334
+ def test_is_isomorphic_4A(self, xp):
335
+ # Tests is_isomorphic on test case #4A
336
+ # (3 flat clusters, different labelings, isomorphic)
337
+ a = xp.asarray([1, 2, 3])
338
+ b = xp.asarray([1, 3, 2])
339
+ assert is_isomorphic(a, b)
340
+ assert is_isomorphic(b, a)
341
+
342
+ def test_is_isomorphic_4B(self, xp):
343
+ # Tests is_isomorphic on test case #4B
344
+ # (3 flat clusters, different labelings, nonisomorphic)
345
+ a = xp.asarray([1, 2, 3, 3])
346
+ b = xp.asarray([1, 3, 2, 3])
347
+ assert is_isomorphic(a, b) is False
348
+ assert is_isomorphic(b, a) is False
349
+
350
+ def test_is_isomorphic_4C(self, xp):
351
+ # Tests is_isomorphic on test case #4C
352
+ # (3 flat clusters, different labelings, isomorphic)
353
+ a = xp.asarray([7, 2, 3])
354
+ b = xp.asarray([6, 3, 2])
355
+ assert is_isomorphic(a, b)
356
+ assert is_isomorphic(b, a)
357
+
358
+ def test_is_isomorphic_5(self, xp):
359
+ # Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
360
+ # clusters, random permutation of the labeling).
361
+ for nc in [2, 3, 5]:
362
+ self.help_is_isomorphic_randperm(1000, nc, xp=xp)
363
+
364
+ def test_is_isomorphic_6(self, xp):
365
+ # Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
366
+ # clusters, random permutation of the labeling, slightly
367
+ # nonisomorphic.)
368
+ for nc in [2, 3, 5]:
369
+ self.help_is_isomorphic_randperm(1000, nc, True, 5, xp=xp)
370
+
371
+ def test_is_isomorphic_7(self, xp):
372
+ # Regression test for gh-6271
373
+ a = xp.asarray([1, 2, 3])
374
+ b = xp.asarray([1, 1, 1])
375
+ assert not is_isomorphic(a, b)
376
+
377
+ def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0,
378
+ *, xp):
379
+ for k in range(3):
380
+ a = (np.random.rand(nobs) * nclusters).astype(int)
381
+ b = np.zeros(a.size, dtype=int)
382
+ P = np.random.permutation(nclusters)
383
+ for i in range(0, a.shape[0]):
384
+ b[i] = P[a[i]]
385
+ if noniso:
386
+ Q = np.random.permutation(nobs)
387
+ b[Q[0:nerrors]] += 1
388
+ b[Q[0:nerrors]] %= nclusters
389
+ a = xp.asarray(a)
390
+ b = xp.asarray(b)
391
+ assert is_isomorphic(a, b) == (not noniso)
392
+ assert is_isomorphic(b, a) == (not noniso)
393
+
394
+
395
+ @skip_if_array_api(cpu_only=True)
396
+ class TestIsValidLinkage:
397
+
398
+ def test_is_valid_linkage_various_size(self, xp):
399
+ for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
400
+ (1, 4, True), (2, 4, True)]:
401
+ self.check_is_valid_linkage_various_size(nrow, ncol, valid, xp)
402
+
403
+ def check_is_valid_linkage_various_size(self, nrow, ncol, valid, xp):
404
+ # Tests is_valid_linkage(Z) with linkage matrices of various sizes
405
+ Z = xp.asarray([[0, 1, 3.0, 2, 5],
406
+ [3, 2, 4.0, 3, 3]], dtype=xp.float64)
407
+ Z = Z[:nrow, :ncol]
408
+ assert_(is_valid_linkage(Z) == valid)
409
+ if not valid:
410
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
411
+
412
+ def test_is_valid_linkage_int_type(self, xp):
413
+ # Tests is_valid_linkage(Z) with integer type.
414
+ Z = xp.asarray([[0, 1, 3.0, 2],
415
+ [3, 2, 4.0, 3]], dtype=xp.int64)
416
+ assert_(is_valid_linkage(Z) is False)
417
+ assert_raises(TypeError, is_valid_linkage, Z, throw=True)
418
+
419
+ def test_is_valid_linkage_empty(self, xp):
420
+ # Tests is_valid_linkage(Z) with empty linkage.
421
+ Z = xp.zeros((0, 4), dtype=xp.float64)
422
+ assert_(is_valid_linkage(Z) is False)
423
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
424
+
425
+ def test_is_valid_linkage_4_and_up(self, xp):
426
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
427
+ # sizes 4 and 15 (step size 3).
428
+ for i in range(4, 15, 3):
429
+ y = np.random.rand(i*(i-1)//2)
430
+ y = xp.asarray(y)
431
+ Z = linkage(y)
432
+ assert_(is_valid_linkage(Z) is True)
433
+
434
+ def test_is_valid_linkage_4_and_up_neg_index_left(self, xp):
435
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
436
+ # sizes 4 and 15 (step size 3) with negative indices (left).
437
+ for i in range(4, 15, 3):
438
+ y = np.random.rand(i*(i-1)//2)
439
+ y = xp.asarray(y)
440
+ Z = linkage(y)
441
+ Z[i//2,0] = -2
442
+ assert_(is_valid_linkage(Z) is False)
443
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
444
+
445
+ def test_is_valid_linkage_4_and_up_neg_index_right(self, xp):
446
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
447
+ # sizes 4 and 15 (step size 3) with negative indices (right).
448
+ for i in range(4, 15, 3):
449
+ y = np.random.rand(i*(i-1)//2)
450
+ y = xp.asarray(y)
451
+ Z = linkage(y)
452
+ Z[i//2,1] = -2
453
+ assert_(is_valid_linkage(Z) is False)
454
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
455
+
456
+ def test_is_valid_linkage_4_and_up_neg_dist(self, xp):
457
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
458
+ # sizes 4 and 15 (step size 3) with negative distances.
459
+ for i in range(4, 15, 3):
460
+ y = np.random.rand(i*(i-1)//2)
461
+ y = xp.asarray(y)
462
+ Z = linkage(y)
463
+ Z[i//2,2] = -0.5
464
+ assert_(is_valid_linkage(Z) is False)
465
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
466
+
467
+ def test_is_valid_linkage_4_and_up_neg_counts(self, xp):
468
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
469
+ # sizes 4 and 15 (step size 3) with negative counts.
470
+ for i in range(4, 15, 3):
471
+ y = np.random.rand(i*(i-1)//2)
472
+ y = xp.asarray(y)
473
+ Z = linkage(y)
474
+ Z[i//2,3] = -2
475
+ assert_(is_valid_linkage(Z) is False)
476
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
477
+
478
+
479
+ @skip_if_array_api(cpu_only=True)
480
+ class TestIsValidInconsistent:
481
+
482
+ def test_is_valid_im_int_type(self, xp):
483
+ # Tests is_valid_im(R) with integer type.
484
+ R = xp.asarray([[0, 1, 3.0, 2],
485
+ [3, 2, 4.0, 3]], dtype=xp.int64)
486
+ assert_(is_valid_im(R) is False)
487
+ assert_raises(TypeError, is_valid_im, R, throw=True)
488
+
489
+ def test_is_valid_im_various_size(self, xp):
490
+ for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
491
+ (1, 4, True), (2, 4, True)]:
492
+ self.check_is_valid_im_various_size(nrow, ncol, valid, xp)
493
+
494
+ def check_is_valid_im_various_size(self, nrow, ncol, valid, xp):
495
+ # Tests is_valid_im(R) with linkage matrices of various sizes
496
+ R = xp.asarray([[0, 1, 3.0, 2, 5],
497
+ [3, 2, 4.0, 3, 3]], dtype=xp.float64)
498
+ R = R[:nrow, :ncol]
499
+ assert_(is_valid_im(R) == valid)
500
+ if not valid:
501
+ assert_raises(ValueError, is_valid_im, R, throw=True)
502
+
503
+ def test_is_valid_im_empty(self, xp):
504
+ # Tests is_valid_im(R) with empty inconsistency matrix.
505
+ R = xp.zeros((0, 4), dtype=xp.float64)
506
+ assert_(is_valid_im(R) is False)
507
+ assert_raises(ValueError, is_valid_im, R, throw=True)
508
+
509
+ def test_is_valid_im_4_and_up(self, xp):
510
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
511
+ # (step size 3).
512
+ for i in range(4, 15, 3):
513
+ y = np.random.rand(i*(i-1)//2)
514
+ y = xp.asarray(y)
515
+ Z = linkage(y)
516
+ R = inconsistent(Z)
517
+ assert_(is_valid_im(R) is True)
518
+
519
+ def test_is_valid_im_4_and_up_neg_index_left(self, xp):
520
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
521
+ # (step size 3) with negative link height means.
522
+ for i in range(4, 15, 3):
523
+ y = np.random.rand(i*(i-1)//2)
524
+ y = xp.asarray(y)
525
+ Z = linkage(y)
526
+ R = inconsistent(Z)
527
+ R[i//2,0] = -2.0
528
+ assert_(is_valid_im(R) is False)
529
+ assert_raises(ValueError, is_valid_im, R, throw=True)
530
+
531
+ def test_is_valid_im_4_and_up_neg_index_right(self, xp):
532
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
533
+ # (step size 3) with negative link height standard deviations.
534
+ for i in range(4, 15, 3):
535
+ y = np.random.rand(i*(i-1)//2)
536
+ y = xp.asarray(y)
537
+ Z = linkage(y)
538
+ R = inconsistent(Z)
539
+ R[i//2,1] = -2.0
540
+ assert_(is_valid_im(R) is False)
541
+ assert_raises(ValueError, is_valid_im, R, throw=True)
542
+
543
+ def test_is_valid_im_4_and_up_neg_dist(self, xp):
544
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
545
+ # (step size 3) with negative link counts.
546
+ for i in range(4, 15, 3):
547
+ y = np.random.rand(i*(i-1)//2)
548
+ y = xp.asarray(y)
549
+ Z = linkage(y)
550
+ R = inconsistent(Z)
551
+ R[i//2,2] = -0.5
552
+ assert_(is_valid_im(R) is False)
553
+ assert_raises(ValueError, is_valid_im, R, throw=True)
554
+
555
+
556
+ class TestNumObsLinkage:
557
+
558
+ @skip_if_array_api(cpu_only=True)
559
+ def test_num_obs_linkage_empty(self, xp):
560
+ # Tests num_obs_linkage(Z) with empty linkage.
561
+ Z = xp.zeros((0, 4), dtype=xp.float64)
562
+ assert_raises(ValueError, num_obs_linkage, Z)
563
+
564
+ def test_num_obs_linkage_1x4(self, xp):
565
+ # Tests num_obs_linkage(Z) on linkage over 2 observations.
566
+ Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64)
567
+ assert_equal(num_obs_linkage(Z), 2)
568
+
569
+ def test_num_obs_linkage_2x4(self, xp):
570
+ # Tests num_obs_linkage(Z) on linkage over 3 observations.
571
+ Z = xp.asarray([[0, 1, 3.0, 2],
572
+ [3, 2, 4.0, 3]], dtype=xp.float64)
573
+ assert_equal(num_obs_linkage(Z), 3)
574
+
575
+ @skip_if_array_api(cpu_only=True)
576
+ def test_num_obs_linkage_4_and_up(self, xp):
577
+ # Tests num_obs_linkage(Z) on linkage on observation sets between sizes
578
+ # 4 and 15 (step size 3).
579
+ for i in range(4, 15, 3):
580
+ y = np.random.rand(i*(i-1)//2)
581
+ y = xp.asarray(y)
582
+ Z = linkage(y)
583
+ assert_equal(num_obs_linkage(Z), i)
584
+
585
+
586
+ @skip_if_array_api(cpu_only=True)
587
+ class TestLeavesList:
588
+
589
+ def test_leaves_list_1x4(self, xp):
590
+ # Tests leaves_list(Z) on a 1x4 linkage.
591
+ Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64)
592
+ to_tree(Z)
593
+ assert_allclose(leaves_list(Z), [0, 1], rtol=1e-15)
594
+
595
+ def test_leaves_list_2x4(self, xp):
596
+ # Tests leaves_list(Z) on a 2x4 linkage.
597
+ Z = xp.asarray([[0, 1, 3.0, 2],
598
+ [3, 2, 4.0, 3]], dtype=xp.float64)
599
+ to_tree(Z)
600
+ assert_allclose(leaves_list(Z), [0, 1, 2], rtol=1e-15)
601
+
602
+ def test_leaves_list_Q(self, xp):
603
+ for method in ['single', 'complete', 'average', 'weighted', 'centroid',
604
+ 'median', 'ward']:
605
+ self.check_leaves_list_Q(method, xp)
606
+
607
+ def check_leaves_list_Q(self, method, xp):
608
+ # Tests leaves_list(Z) on the Q data set
609
+ X = xp.asarray(hierarchy_test_data.Q_X)
610
+ Z = linkage(X, method)
611
+ node = to_tree(Z)
612
+ assert_allclose(node.pre_order(), leaves_list(Z), rtol=1e-15)
613
+
614
+ def test_Q_subtree_pre_order(self, xp):
615
+ # Tests that pre_order() works when called on sub-trees.
616
+ X = xp.asarray(hierarchy_test_data.Q_X)
617
+ Z = linkage(X, 'single')
618
+ node = to_tree(Z)
619
+ assert_allclose(node.pre_order(), (node.get_left().pre_order()
620
+ + node.get_right().pre_order()),
621
+ rtol=1e-15)
622
+
623
+
624
+ @skip_if_array_api(cpu_only=True)
625
+ class TestCorrespond:
626
+
627
+ def test_correspond_empty(self, xp):
628
+ # Tests correspond(Z, y) with empty linkage and condensed distance matrix.
629
+ y = xp.zeros((0,), dtype=xp.float64)
630
+ Z = xp.zeros((0,4), dtype=xp.float64)
631
+ assert_raises(ValueError, correspond, Z, y)
632
+
633
+ def test_correspond_2_and_up(self, xp):
634
+ # Tests correspond(Z, y) on linkage and CDMs over observation sets of
635
+ # different sizes.
636
+ for i in range(2, 4):
637
+ y = np.random.rand(i*(i-1)//2)
638
+ y = xp.asarray(y)
639
+ Z = linkage(y)
640
+ assert_(correspond(Z, y))
641
+ for i in range(4, 15, 3):
642
+ y = np.random.rand(i*(i-1)//2)
643
+ y = xp.asarray(y)
644
+ Z = linkage(y)
645
+ assert_(correspond(Z, y))
646
+
647
+ def test_correspond_4_and_up(self, xp):
648
+ # Tests correspond(Z, y) on linkage and CDMs over observation sets of
649
+ # different sizes. Correspondence should be false.
650
+ for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
651
+ list(zip(list(range(3, 5)), list(range(2, 4))))):
652
+ y = np.random.rand(i*(i-1)//2)
653
+ y2 = np.random.rand(j*(j-1)//2)
654
+ y = xp.asarray(y)
655
+ y2 = xp.asarray(y2)
656
+ Z = linkage(y)
657
+ Z2 = linkage(y2)
658
+ assert not correspond(Z, y2)
659
+ assert not correspond(Z2, y)
660
+
661
+ def test_correspond_4_and_up_2(self, xp):
662
+ # Tests correspond(Z, y) on linkage and CDMs over observation sets of
663
+ # different sizes. Correspondence should be false.
664
+ for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
665
+ list(zip(list(range(2, 7)), list(range(16, 21))))):
666
+ y = np.random.rand(i*(i-1)//2)
667
+ y2 = np.random.rand(j*(j-1)//2)
668
+ y = xp.asarray(y)
669
+ y2 = xp.asarray(y2)
670
+ Z = linkage(y)
671
+ Z2 = linkage(y2)
672
+ assert not correspond(Z, y2)
673
+ assert not correspond(Z2, y)
674
+
675
+ def test_num_obs_linkage_multi_matrix(self, xp):
676
+ # Tests num_obs_linkage with observation matrices of multiple sizes.
677
+ for n in range(2, 10):
678
+ X = np.random.rand(n, 4)
679
+ Y = pdist(X)
680
+ Y = xp.asarray(Y)
681
+ Z = linkage(Y)
682
+ assert_equal(num_obs_linkage(Z), n)
683
+
684
+
685
+ @skip_if_array_api(cpu_only=True)
686
+ class TestIsMonotonic:
687
+
688
+ def test_is_monotonic_empty(self, xp):
689
+ # Tests is_monotonic(Z) on an empty linkage.
690
+ Z = xp.zeros((0, 4), dtype=xp.float64)
691
+ assert_raises(ValueError, is_monotonic, Z)
692
+
693
+ def test_is_monotonic_1x4(self, xp):
694
+ # Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
695
+ Z = xp.asarray([[0, 1, 0.3, 2]], dtype=xp.float64)
696
+ assert is_monotonic(Z)
697
+
698
+ def test_is_monotonic_2x4_T(self, xp):
699
+ # Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
700
+ Z = xp.asarray([[0, 1, 0.3, 2],
701
+ [2, 3, 0.4, 3]], dtype=xp.float64)
702
+ assert is_monotonic(Z)
703
+
704
+ def test_is_monotonic_2x4_F(self, xp):
705
+ # Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
706
+ Z = xp.asarray([[0, 1, 0.4, 2],
707
+ [2, 3, 0.3, 3]], dtype=xp.float64)
708
+ assert not is_monotonic(Z)
709
+
710
+ def test_is_monotonic_3x4_T(self, xp):
711
+ # Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
712
+ Z = xp.asarray([[0, 1, 0.3, 2],
713
+ [2, 3, 0.4, 2],
714
+ [4, 5, 0.6, 4]], dtype=xp.float64)
715
+ assert is_monotonic(Z)
716
+
717
+ def test_is_monotonic_3x4_F1(self, xp):
718
+ # Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
719
+ Z = xp.asarray([[0, 1, 0.3, 2],
720
+ [2, 3, 0.2, 2],
721
+ [4, 5, 0.6, 4]], dtype=xp.float64)
722
+ assert not is_monotonic(Z)
723
+
724
+ def test_is_monotonic_3x4_F2(self, xp):
725
+ # Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
726
+ Z = xp.asarray([[0, 1, 0.8, 2],
727
+ [2, 3, 0.4, 2],
728
+ [4, 5, 0.6, 4]], dtype=xp.float64)
729
+ assert not is_monotonic(Z)
730
+
731
+ def test_is_monotonic_3x4_F3(self, xp):
732
+ # Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
733
+ Z = xp.asarray([[0, 1, 0.3, 2],
734
+ [2, 3, 0.4, 2],
735
+ [4, 5, 0.2, 4]], dtype=xp.float64)
736
+ assert not is_monotonic(Z)
737
+
738
+ def test_is_monotonic_tdist_linkage1(self, xp):
739
+ # Tests is_monotonic(Z) on clustering generated by single linkage on
740
+ # tdist data set. Expecting True.
741
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
742
+ assert is_monotonic(Z)
743
+
744
+ def test_is_monotonic_tdist_linkage2(self, xp):
745
+ # Tests is_monotonic(Z) on clustering generated by single linkage on
746
+ # tdist data set. Perturbing. Expecting False.
747
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
748
+ Z[2,2] = 0.0
749
+ assert not is_monotonic(Z)
750
+
751
+ def test_is_monotonic_Q_linkage(self, xp):
752
+ # Tests is_monotonic(Z) on clustering generated by single linkage on
753
+ # Q data set. Expecting True.
754
+ X = xp.asarray(hierarchy_test_data.Q_X)
755
+ Z = linkage(X, 'single')
756
+ assert is_monotonic(Z)
757
+
758
+
759
+ @skip_if_array_api(cpu_only=True)
760
+ class TestMaxDists:
761
+
762
+ def test_maxdists_empty_linkage(self, xp):
763
+ # Tests maxdists(Z) on empty linkage. Expecting exception.
764
+ Z = xp.zeros((0, 4), dtype=xp.float64)
765
+ assert_raises(ValueError, maxdists, Z)
766
+
767
+ def test_maxdists_one_cluster_linkage(self, xp):
768
+ # Tests maxdists(Z) on linkage with one cluster.
769
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
770
+ MD = maxdists(Z)
771
+ expectedMD = calculate_maximum_distances(Z, xp)
772
+ xp_assert_close(MD, expectedMD, atol=1e-15)
773
+
774
+ def test_maxdists_Q_linkage(self, xp):
775
+ for method in ['single', 'complete', 'ward', 'centroid', 'median']:
776
+ self.check_maxdists_Q_linkage(method, xp)
777
+
778
+ def check_maxdists_Q_linkage(self, method, xp):
779
+ # Tests maxdists(Z) on the Q data set
780
+ X = xp.asarray(hierarchy_test_data.Q_X)
781
+ Z = linkage(X, method)
782
+ MD = maxdists(Z)
783
+ expectedMD = calculate_maximum_distances(Z, xp)
784
+ xp_assert_close(MD, expectedMD, atol=1e-15)
785
+
786
+
787
+ class TestMaxInconsts:
788
+
789
+ @skip_if_array_api(cpu_only=True)
790
+ def test_maxinconsts_empty_linkage(self, xp):
791
+ # Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
792
+ Z = xp.zeros((0, 4), dtype=xp.float64)
793
+ R = xp.zeros((0, 4), dtype=xp.float64)
794
+ assert_raises(ValueError, maxinconsts, Z, R)
795
+
796
+ def test_maxinconsts_difrow_linkage(self, xp):
797
+ # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
798
+ # different numbers of clusters. Expecting exception.
799
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
800
+ R = np.random.rand(2, 4)
801
+ R = xp.asarray(R)
802
+ assert_raises(ValueError, maxinconsts, Z, R)
803
+
804
+ @skip_if_array_api(cpu_only=True)
805
+ def test_maxinconsts_one_cluster_linkage(self, xp):
806
+ # Tests maxinconsts(Z, R) on linkage with one cluster.
807
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
808
+ R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64)
809
+ MD = maxinconsts(Z, R)
810
+ expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp)
811
+ xp_assert_close(MD, expectedMD, atol=1e-15)
812
+
813
+ @skip_if_array_api(cpu_only=True)
814
+ def test_maxinconsts_Q_linkage(self, xp):
815
+ for method in ['single', 'complete', 'ward', 'centroid', 'median']:
816
+ self.check_maxinconsts_Q_linkage(method, xp)
817
+
818
+ def check_maxinconsts_Q_linkage(self, method, xp):
819
+ # Tests maxinconsts(Z, R) on the Q data set
820
+ X = xp.asarray(hierarchy_test_data.Q_X)
821
+ Z = linkage(X, method)
822
+ R = inconsistent(Z)
823
+ MD = maxinconsts(Z, R)
824
+ expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp)
825
+ xp_assert_close(MD, expectedMD, atol=1e-15)
826
+
827
+
828
+ class TestMaxRStat:
829
+
830
+ def test_maxRstat_invalid_index(self, xp):
831
+ for i in [3.3, -1, 4]:
832
+ self.check_maxRstat_invalid_index(i, xp)
833
+
834
+ def check_maxRstat_invalid_index(self, i, xp):
835
+ # Tests maxRstat(Z, R, i). Expecting exception.
836
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
837
+ R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64)
838
+ if isinstance(i, int):
839
+ assert_raises(ValueError, maxRstat, Z, R, i)
840
+ else:
841
+ assert_raises(TypeError, maxRstat, Z, R, i)
842
+
843
+ @skip_if_array_api(cpu_only=True)
844
+ def test_maxRstat_empty_linkage(self, xp):
845
+ for i in range(4):
846
+ self.check_maxRstat_empty_linkage(i, xp)
847
+
848
+ def check_maxRstat_empty_linkage(self, i, xp):
849
+ # Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
850
+ Z = xp.zeros((0, 4), dtype=xp.float64)
851
+ R = xp.zeros((0, 4), dtype=xp.float64)
852
+ assert_raises(ValueError, maxRstat, Z, R, i)
853
+
854
+ def test_maxRstat_difrow_linkage(self, xp):
855
+ for i in range(4):
856
+ self.check_maxRstat_difrow_linkage(i, xp)
857
+
858
+ def check_maxRstat_difrow_linkage(self, i, xp):
859
+ # Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
860
+ # different numbers of clusters. Expecting exception.
861
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
862
+ R = np.random.rand(2, 4)
863
+ R = xp.asarray(R)
864
+ assert_raises(ValueError, maxRstat, Z, R, i)
865
+
866
+ @skip_if_array_api(cpu_only=True)
867
+ def test_maxRstat_one_cluster_linkage(self, xp):
868
+ for i in range(4):
869
+ self.check_maxRstat_one_cluster_linkage(i, xp)
870
+
871
+ def check_maxRstat_one_cluster_linkage(self, i, xp):
872
+ # Tests maxRstat(Z, R, i) on linkage with one cluster.
873
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
874
+ R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64)
875
+ MD = maxRstat(Z, R, 1)
876
+ expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp)
877
+ xp_assert_close(MD, expectedMD, atol=1e-15)
878
+
879
+ @skip_if_array_api(cpu_only=True)
880
+ def test_maxRstat_Q_linkage(self, xp):
881
+ for method in ['single', 'complete', 'ward', 'centroid', 'median']:
882
+ for i in range(4):
883
+ self.check_maxRstat_Q_linkage(method, i, xp)
884
+
885
+ def check_maxRstat_Q_linkage(self, method, i, xp):
886
+ # Tests maxRstat(Z, R, i) on the Q data set
887
+ X = xp.asarray(hierarchy_test_data.Q_X)
888
+ Z = linkage(X, method)
889
+ R = inconsistent(Z)
890
+ MD = maxRstat(Z, R, 1)
891
+ expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp)
892
+ xp_assert_close(MD, expectedMD, atol=1e-15)
893
+
894
+
895
+ @skip_if_array_api(cpu_only=True)
896
+ class TestDendrogram:
897
+
898
+ def test_dendrogram_single_linkage_tdist(self, xp):
899
+ # Tests dendrogram calculation on single linkage of the tdist data set.
900
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
901
+ R = dendrogram(Z, no_plot=True)
902
+ leaves = R["leaves"]
903
+ assert_equal(leaves, [2, 5, 1, 0, 3, 4])
904
+
905
+ def test_valid_orientation(self, xp):
906
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
907
+ assert_raises(ValueError, dendrogram, Z, orientation="foo")
908
+
909
+ def test_labels_as_array_or_list(self, xp):
910
+ # test for gh-12418
911
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
912
+ labels = xp.asarray([1, 3, 2, 6, 4, 5])
913
+ result1 = dendrogram(Z, labels=labels, no_plot=True)
914
+ result2 = dendrogram(Z, labels=list(labels), no_plot=True)
915
+ assert result1 == result2
916
+
917
+ @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
918
+ def test_valid_label_size(self, xp):
919
+ link = xp.asarray([
920
+ [0, 1, 1.0, 4],
921
+ [2, 3, 1.0, 5],
922
+ [4, 5, 2.0, 6],
923
+ ])
924
+ plt.figure()
925
+ with pytest.raises(ValueError) as exc_info:
926
+ dendrogram(link, labels=list(range(100)))
927
+ assert "Dimensions of Z and labels must be consistent."\
928
+ in str(exc_info.value)
929
+
930
+ with pytest.raises(
931
+ ValueError,
932
+ match="Dimensions of Z and labels must be consistent."):
933
+ dendrogram(link, labels=[])
934
+
935
+ plt.close()
936
+
937
+ @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
938
+ def test_dendrogram_plot(self, xp):
939
+ for orientation in ['top', 'bottom', 'left', 'right']:
940
+ self.check_dendrogram_plot(orientation, xp)
941
+
942
+ def check_dendrogram_plot(self, orientation, xp):
943
+ # Tests dendrogram plotting.
944
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
945
+ expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'],
946
+ 'dcoord': [[0.0, 138.0, 138.0, 0.0],
947
+ [0.0, 219.0, 219.0, 0.0],
948
+ [0.0, 255.0, 255.0, 219.0],
949
+ [0.0, 268.0, 268.0, 255.0],
950
+ [138.0, 295.0, 295.0, 268.0]],
951
+ 'icoord': [[5.0, 5.0, 15.0, 15.0],
952
+ [45.0, 45.0, 55.0, 55.0],
953
+ [35.0, 35.0, 50.0, 50.0],
954
+ [25.0, 25.0, 42.5, 42.5],
955
+ [10.0, 10.0, 33.75, 33.75]],
956
+ 'ivl': ['2', '5', '1', '0', '3', '4'],
957
+ 'leaves': [2, 5, 1, 0, 3, 4],
958
+ 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'],
959
+ }
960
+
961
+ fig = plt.figure()
962
+ ax = fig.add_subplot(221)
963
+
964
+ # test that dendrogram accepts ax keyword
965
+ R1 = dendrogram(Z, ax=ax, orientation=orientation)
966
+ R1['dcoord'] = np.asarray(R1['dcoord'])
967
+ assert_equal(R1, expected)
968
+
969
+ # test that dendrogram accepts and handle the leaf_font_size and
970
+ # leaf_rotation keywords
971
+ dendrogram(Z, ax=ax, orientation=orientation,
972
+ leaf_font_size=20, leaf_rotation=90)
973
+ testlabel = (
974
+ ax.get_xticklabels()[0]
975
+ if orientation in ['top', 'bottom']
976
+ else ax.get_yticklabels()[0]
977
+ )
978
+ assert_equal(testlabel.get_rotation(), 90)
979
+ assert_equal(testlabel.get_size(), 20)
980
+ dendrogram(Z, ax=ax, orientation=orientation,
981
+ leaf_rotation=90)
982
+ testlabel = (
983
+ ax.get_xticklabels()[0]
984
+ if orientation in ['top', 'bottom']
985
+ else ax.get_yticklabels()[0]
986
+ )
987
+ assert_equal(testlabel.get_rotation(), 90)
988
+ dendrogram(Z, ax=ax, orientation=orientation,
989
+ leaf_font_size=20)
990
+ testlabel = (
991
+ ax.get_xticklabels()[0]
992
+ if orientation in ['top', 'bottom']
993
+ else ax.get_yticklabels()[0]
994
+ )
995
+ assert_equal(testlabel.get_size(), 20)
996
+ plt.close()
997
+
998
+ # test plotting to gca (will import pylab)
999
+ R2 = dendrogram(Z, orientation=orientation)
1000
+ plt.close()
1001
+ R2['dcoord'] = np.asarray(R2['dcoord'])
1002
+ assert_equal(R2, expected)
1003
+
1004
+ @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
1005
+ def test_dendrogram_truncate_mode(self, xp):
1006
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
1007
+
1008
+ R = dendrogram(Z, 2, 'lastp', show_contracted=True)
1009
+ plt.close()
1010
+ R['dcoord'] = np.asarray(R['dcoord'])
1011
+ assert_equal(R, {'color_list': ['C0'],
1012
+ 'dcoord': [[0.0, 295.0, 295.0, 0.0]],
1013
+ 'icoord': [[5.0, 5.0, 15.0, 15.0]],
1014
+ 'ivl': ['(2)', '(4)'],
1015
+ 'leaves': [6, 9],
1016
+ 'leaves_color_list': ['C0', 'C0'],
1017
+ })
1018
+
1019
+ R = dendrogram(Z, 2, 'mtica', show_contracted=True)
1020
+ plt.close()
1021
+ R['dcoord'] = np.asarray(R['dcoord'])
1022
+ assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'],
1023
+ 'dcoord': [[0.0, 138.0, 138.0, 0.0],
1024
+ [0.0, 255.0, 255.0, 0.0],
1025
+ [0.0, 268.0, 268.0, 255.0],
1026
+ [138.0, 295.0, 295.0, 268.0]],
1027
+ 'icoord': [[5.0, 5.0, 15.0, 15.0],
1028
+ [35.0, 35.0, 45.0, 45.0],
1029
+ [25.0, 25.0, 40.0, 40.0],
1030
+ [10.0, 10.0, 32.5, 32.5]],
1031
+ 'ivl': ['2', '5', '1', '0', '(2)'],
1032
+ 'leaves': [2, 5, 1, 0, 7],
1033
+ 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'],
1034
+ })
1035
+
1036
+ def test_dendrogram_colors(self, xp):
1037
+ # Tests dendrogram plots with alternate colors
1038
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
1039
+
1040
+ set_link_color_palette(['c', 'm', 'y', 'k'])
1041
+ R = dendrogram(Z, no_plot=True,
1042
+ above_threshold_color='g', color_threshold=250)
1043
+ set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
1044
+
1045
+ color_list = R['color_list']
1046
+ assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
1047
+
1048
+ # reset color palette (global list)
1049
+ set_link_color_palette(None)
1050
+
1051
+ def test_dendrogram_leaf_colors_zero_dist(self, xp):
1052
+ # tests that the colors of leafs are correct for tree
1053
+ # with two identical points
1054
+ x = xp.asarray([[1, 0, 0],
1055
+ [0, 0, 1],
1056
+ [0, 2, 0],
1057
+ [0, 0, 1],
1058
+ [0, 1, 0],
1059
+ [0, 1, 0]])
1060
+ z = linkage(x, "single")
1061
+ d = dendrogram(z, no_plot=True)
1062
+ exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2']
1063
+ colors = d["leaves_color_list"]
1064
+ assert_equal(colors, exp_colors)
1065
+
1066
+ def test_dendrogram_leaf_colors(self, xp):
1067
+ # tests that the colors are correct for a tree
1068
+ # with two near points ((0, 0, 1.1) and (0, 0, 1))
1069
+ x = xp.asarray([[1, 0, 0],
1070
+ [0, 0, 1.1],
1071
+ [0, 2, 0],
1072
+ [0, 0, 1],
1073
+ [0, 1, 0],
1074
+ [0, 1, 0]])
1075
+ z = linkage(x, "single")
1076
+ d = dendrogram(z, no_plot=True)
1077
+ exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2']
1078
+ colors = d["leaves_color_list"]
1079
+ assert_equal(colors, exp_colors)
1080
+
1081
+
1082
+ def calculate_maximum_distances(Z, xp):
1083
+ # Used for testing correctness of maxdists.
1084
+ n = Z.shape[0] + 1
1085
+ B = xp.zeros((n-1,), dtype=Z.dtype)
1086
+ q = xp.zeros((3,))
1087
+ for i in range(0, n - 1):
1088
+ q[:] = 0.0
1089
+ left = Z[i, 0]
1090
+ right = Z[i, 1]
1091
+ if left >= n:
1092
+ q[0] = B[xp.asarray(left, dtype=xp.int64) - n]
1093
+ if right >= n:
1094
+ q[1] = B[xp.asarray(right, dtype=xp.int64) - n]
1095
+ q[2] = Z[i, 2]
1096
+ B[i] = xp.max(q)
1097
+ return B
1098
+
1099
+
1100
+ def calculate_maximum_inconsistencies(Z, R, k=3, xp=np):
1101
+ # Used for testing correctness of maxinconsts.
1102
+ n = Z.shape[0] + 1
1103
+ dtype = xp.result_type(Z, R)
1104
+ B = xp.zeros((n-1,), dtype=dtype)
1105
+ q = xp.zeros((3,))
1106
+ for i in range(0, n - 1):
1107
+ q[:] = 0.0
1108
+ left = Z[i, 0]
1109
+ right = Z[i, 1]
1110
+ if left >= n:
1111
+ q[0] = B[xp.asarray(left, dtype=xp.int64) - n]
1112
+ if right >= n:
1113
+ q[1] = B[xp.asarray(right, dtype=xp.int64) - n]
1114
+ q[2] = R[i, k]
1115
+ B[i] = xp.max(q)
1116
+ return B
1117
+
1118
+
1119
+ @skip_if_array_api(cpu_only=True)
1120
+ def test_unsupported_uncondensed_distance_matrix_linkage_warning(xp):
1121
+ assert_warns(ClusterWarning, linkage, xp.asarray([[0, 1], [1, 0]]))
1122
+
1123
+
1124
+ def test_euclidean_linkage_value_error(xp):
1125
+ for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS:
1126
+ assert_raises(ValueError, linkage, xp.asarray([[1, 1], [1, 1]]),
1127
+ method=method, metric='cityblock')
1128
+
1129
+
1130
+ @skip_if_array_api(cpu_only=True)
1131
+ def test_2x2_linkage(xp):
1132
+ Z1 = linkage(xp.asarray([1]), method='single', metric='euclidean')
1133
+ Z2 = linkage(xp.asarray([[0, 1], [0, 0]]), method='single', metric='euclidean')
1134
+ xp_assert_close(Z1, Z2, rtol=1e-15)
1135
+
1136
+
1137
+ @skip_if_array_api(cpu_only=True)
1138
+ def test_node_compare(xp):
1139
+ np.random.seed(23)
1140
+ nobs = 50
1141
+ X = np.random.randn(nobs, 4)
1142
+ X = xp.asarray(X)
1143
+ Z = scipy.cluster.hierarchy.ward(X)
1144
+ tree = to_tree(Z)
1145
+ assert_(tree > tree.get_left())
1146
+ assert_(tree.get_right() > tree.get_left())
1147
+ assert_(tree.get_right() == tree.get_right())
1148
+ assert_(tree.get_right() != tree.get_left())
1149
+
1150
+
1151
+ @skip_if_array_api(np_only=True, reasons=['`cut_tree` uses non-standard indexing'])
1152
+ def test_cut_tree(xp):
1153
+ np.random.seed(23)
1154
+ nobs = 50
1155
+ X = np.random.randn(nobs, 4)
1156
+ X = xp.asarray(X)
1157
+ Z = scipy.cluster.hierarchy.ward(X)
1158
+ cutree = cut_tree(Z)
1159
+
1160
+ # cutree.dtype varies between int32 and int64 over platforms
1161
+ xp_assert_close(cutree[:, 0], xp.arange(nobs), rtol=1e-15, check_dtype=False)
1162
+ xp_assert_close(cutree[:, -1], xp.zeros(nobs), rtol=1e-15, check_dtype=False)
1163
+ assert_equal(np.asarray(cutree).max(0), np.arange(nobs - 1, -1, -1))
1164
+
1165
+ xp_assert_close(cutree[:, [-5]], cut_tree(Z, n_clusters=5), rtol=1e-15)
1166
+ xp_assert_close(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]), rtol=1e-15)
1167
+ xp_assert_close(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]), rtol=1e-15)
1168
+
1169
+ nodes = _order_cluster_tree(Z)
1170
+ heights = xp.asarray([node.dist for node in nodes])
1171
+
1172
+ xp_assert_close(cutree[:, np.searchsorted(heights, [5])],
1173
+ cut_tree(Z, height=5), rtol=1e-15)
1174
+ xp_assert_close(cutree[:, np.searchsorted(heights, [5, 10])],
1175
+ cut_tree(Z, height=[5, 10]), rtol=1e-15)
1176
+ xp_assert_close(cutree[:, np.searchsorted(heights, [10, 5])],
1177
+ cut_tree(Z, height=[10, 5]), rtol=1e-15)
1178
+
1179
+
1180
+ @skip_if_array_api(cpu_only=True)
1181
+ def test_optimal_leaf_ordering(xp):
1182
+ # test with the distance vector y
1183
+ Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.ytdist)),
1184
+ xp.asarray(hierarchy_test_data.ytdist))
1185
+ expectedZ = hierarchy_test_data.linkage_ytdist_single_olo
1186
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10)
1187
+
1188
+ # test with the observation matrix X
1189
+ Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.X), 'ward'),
1190
+ xp.asarray(hierarchy_test_data.X))
1191
+ expectedZ = hierarchy_test_data.linkage_X_ward_olo
1192
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
1193
+
1194
+
1195
+ @skip_if_array_api(np_only=True, reasons=['`Heap` only supports NumPy backend'])
1196
+ def test_Heap(xp):
1197
+ values = xp.asarray([2, -1, 0, -1.5, 3])
1198
+ heap = Heap(values)
1199
+
1200
+ pair = heap.get_min()
1201
+ assert_equal(pair['key'], 3)
1202
+ assert_equal(pair['value'], -1.5)
1203
+
1204
+ heap.remove_min()
1205
+ pair = heap.get_min()
1206
+ assert_equal(pair['key'], 1)
1207
+ assert_equal(pair['value'], -1)
1208
+
1209
+ heap.change_value(1, 2.5)
1210
+ pair = heap.get_min()
1211
+ assert_equal(pair['key'], 2)
1212
+ assert_equal(pair['value'], 0)
1213
+
1214
+ heap.remove_min()
1215
+ heap.remove_min()
1216
+
1217
+ heap.change_value(1, 10)
1218
+ pair = heap.get_min()
1219
+ assert_equal(pair['key'], 4)
1220
+ assert_equal(pair['value'], 3)
1221
+
1222
+ heap.remove_min()
1223
+ pair = heap.get_min()
1224
+ assert_equal(pair['key'], 1)
1225
+ assert_equal(pair['value'], 10)
venv/lib/python3.10/site-packages/scipy/cluster/vq.py ADDED
@@ -0,0 +1,835 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
3
+ ====================================================================
4
+
5
+ Provides routines for k-means clustering, generating code books
6
+ from k-means models and quantizing vectors by comparing them with
7
+ centroids in a code book.
8
+
9
+ .. autosummary::
10
+ :toctree: generated/
11
+
12
+ whiten -- Normalize a group of observations so each feature has unit variance
13
+ vq -- Calculate code book membership of a set of observation vectors
14
+ kmeans -- Perform k-means on a set of observation vectors forming k clusters
15
+ kmeans2 -- A different implementation of k-means with more methods
16
+ -- for initializing centroids
17
+
18
+ Background information
19
+ ----------------------
20
+ The k-means algorithm takes as input the number of clusters to
21
+ generate, k, and a set of observation vectors to cluster. It
22
+ returns a set of centroids, one for each of the k clusters. An
23
+ observation vector is classified with the cluster number or
24
+ centroid index of the centroid closest to it.
25
+
26
+ A vector v belongs to cluster i if it is closer to centroid i than
27
+ any other centroid. If v belongs to i, we say centroid i is the
28
+ dominating centroid of v. The k-means algorithm tries to
29
+ minimize distortion, which is defined as the sum of the squared distances
30
+ between each observation vector and its dominating centroid.
31
+ The minimization is achieved by iteratively reclassifying
32
+ the observations into clusters and recalculating the centroids until
33
+ a configuration is reached in which the centroids are stable. One can
34
+ also define a maximum number of iterations.
35
+
36
+ Since vector quantization is a natural application for k-means,
37
+ information theory terminology is often used. The centroid index
38
+ or cluster index is also referred to as a "code" and the table
39
+ mapping codes to centroids and, vice versa, is often referred to as a
40
+ "code book". The result of k-means, a set of centroids, can be
41
+ used to quantize vectors. Quantization aims to find an encoding of
42
+ vectors that reduces the expected distortion.
43
+
44
+ All routines expect obs to be an M by N array, where the rows are
45
+ the observation vectors. The codebook is a k by N array, where the
46
+ ith row is the centroid of code word i. The observation vectors
47
+ and centroids have the same feature dimension.
48
+
49
+ As an example, suppose we wish to compress a 24-bit color image
50
+ (each pixel is represented by one byte for red, one for blue, and
51
+ one for green) before sending it over the web. By using a smaller
52
+ 8-bit encoding, we can reduce the amount of data by two
53
+ thirds. Ideally, the colors for each of the 256 possible 8-bit
54
+ encoding values should be chosen to minimize distortion of the
55
+ color. Running k-means with k=256 generates a code book of 256
56
+ codes, which fills up all possible 8-bit sequences. Instead of
57
+ sending a 3-byte value for each pixel, the 8-bit centroid index
58
+ (or code word) of the dominating centroid is transmitted. The code
59
+ book is also sent over the wire so each 8-bit code can be
60
+ translated back to a 24-bit pixel value representation. If the
61
+ image of interest was of an ocean, we would expect many 24-bit
62
+ blues to be represented by 8-bit codes. If it was an image of a
63
+ human face, more flesh-tone colors would be represented in the
64
+ code book.
65
+
66
+ """
67
+ import warnings
68
+ import numpy as np
69
+ from collections import deque
70
+ from scipy._lib._array_api import (
71
+ _asarray, array_namespace, size, atleast_nd, copy, cov
72
+ )
73
+ from scipy._lib._util import check_random_state, rng_integers
74
+ from scipy.spatial.distance import cdist
75
+
76
+ from . import _vq
77
+
78
+ __docformat__ = 'restructuredtext'
79
+
80
+ __all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
81
+
82
+
83
+ class ClusterError(Exception):
84
+ pass
85
+
86
+
87
+ def whiten(obs, check_finite=True):
88
+ """
89
+ Normalize a group of observations on a per feature basis.
90
+
91
+ Before running k-means, it is beneficial to rescale each feature
92
+ dimension of the observation set by its standard deviation (i.e. "whiten"
93
+ it - as in "white noise" where each frequency has equal power).
94
+ Each feature is divided by its standard deviation across all observations
95
+ to give it unit variance.
96
+
97
+ Parameters
98
+ ----------
99
+ obs : ndarray
100
+ Each row of the array is an observation. The
101
+ columns are the features seen during each observation.
102
+
103
+ >>> # f0 f1 f2
104
+ >>> obs = [[ 1., 1., 1.], #o0
105
+ ... [ 2., 2., 2.], #o1
106
+ ... [ 3., 3., 3.], #o2
107
+ ... [ 4., 4., 4.]] #o3
108
+
109
+ check_finite : bool, optional
110
+ Whether to check that the input matrices contain only finite numbers.
111
+ Disabling may give a performance gain, but may result in problems
112
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
113
+ Default: True
114
+
115
+ Returns
116
+ -------
117
+ result : ndarray
118
+ Contains the values in `obs` scaled by the standard deviation
119
+ of each column.
120
+
121
+ Examples
122
+ --------
123
+ >>> import numpy as np
124
+ >>> from scipy.cluster.vq import whiten
125
+ >>> features = np.array([[1.9, 2.3, 1.7],
126
+ ... [1.5, 2.5, 2.2],
127
+ ... [0.8, 0.6, 1.7,]])
128
+ >>> whiten(features)
129
+ array([[ 4.17944278, 2.69811351, 7.21248917],
130
+ [ 3.29956009, 2.93273208, 9.33380951],
131
+ [ 1.75976538, 0.7038557 , 7.21248917]])
132
+
133
+ """
134
+ xp = array_namespace(obs)
135
+ obs = _asarray(obs, check_finite=check_finite, xp=xp)
136
+ std_dev = xp.std(obs, axis=0)
137
+ zero_std_mask = std_dev == 0
138
+ if xp.any(zero_std_mask):
139
+ std_dev[zero_std_mask] = 1.0
140
+ warnings.warn("Some columns have standard deviation zero. "
141
+ "The values of these columns will not change.",
142
+ RuntimeWarning, stacklevel=2)
143
+ return obs / std_dev
144
+
145
+
146
+ def vq(obs, code_book, check_finite=True):
147
+ """
148
+ Assign codes from a code book to observations.
149
+
150
+ Assigns a code from a code book to each observation. Each
151
+ observation vector in the 'M' by 'N' `obs` array is compared with the
152
+ centroids in the code book and assigned the code of the closest
153
+ centroid.
154
+
155
+ The features in `obs` should have unit variance, which can be
156
+ achieved by passing them through the whiten function. The code
157
+ book can be created with the k-means algorithm or a different
158
+ encoding algorithm.
159
+
160
+ Parameters
161
+ ----------
162
+ obs : ndarray
163
+ Each row of the 'M' x 'N' array is an observation. The columns are
164
+ the "features" seen during each observation. The features must be
165
+ whitened first using the whiten function or something equivalent.
166
+ code_book : ndarray
167
+ The code book is usually generated using the k-means algorithm.
168
+ Each row of the array holds a different code, and the columns are
169
+ the features of the code.
170
+
171
+ >>> # f0 f1 f2 f3
172
+ >>> code_book = [
173
+ ... [ 1., 2., 3., 4.], #c0
174
+ ... [ 1., 2., 3., 4.], #c1
175
+ ... [ 1., 2., 3., 4.]] #c2
176
+
177
+ check_finite : bool, optional
178
+ Whether to check that the input matrices contain only finite numbers.
179
+ Disabling may give a performance gain, but may result in problems
180
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
181
+ Default: True
182
+
183
+ Returns
184
+ -------
185
+ code : ndarray
186
+ A length M array holding the code book index for each observation.
187
+ dist : ndarray
188
+ The distortion (distance) between the observation and its nearest
189
+ code.
190
+
191
+ Examples
192
+ --------
193
+ >>> import numpy as np
194
+ >>> from scipy.cluster.vq import vq
195
+ >>> code_book = np.array([[1., 1., 1.],
196
+ ... [2., 2., 2.]])
197
+ >>> features = np.array([[1.9, 2.3, 1.7],
198
+ ... [1.5, 2.5, 2.2],
199
+ ... [0.8, 0.6, 1.7]])
200
+ >>> vq(features, code_book)
201
+ (array([1, 1, 0], dtype=int32), array([0.43588989, 0.73484692, 0.83066239]))
202
+
203
+ """
204
+ xp = array_namespace(obs, code_book)
205
+ obs = _asarray(obs, xp=xp, check_finite=check_finite)
206
+ code_book = _asarray(code_book, xp=xp, check_finite=check_finite)
207
+ ct = xp.result_type(obs, code_book)
208
+
209
+ c_obs = xp.astype(obs, ct, copy=False)
210
+ c_code_book = xp.astype(code_book, ct, copy=False)
211
+
212
+ if xp.isdtype(ct, kind='real floating'):
213
+ c_obs = np.asarray(c_obs)
214
+ c_code_book = np.asarray(c_code_book)
215
+ result = _vq.vq(c_obs, c_code_book)
216
+ return xp.asarray(result[0]), xp.asarray(result[1])
217
+ return py_vq(obs, code_book, check_finite=False)
218
+
219
+
220
+ def py_vq(obs, code_book, check_finite=True):
221
+ """ Python version of vq algorithm.
222
+
223
+ The algorithm computes the Euclidean distance between each
224
+ observation and every frame in the code_book.
225
+
226
+ Parameters
227
+ ----------
228
+ obs : ndarray
229
+ Expects a rank 2 array. Each row is one observation.
230
+ code_book : ndarray
231
+ Code book to use. Same format than obs. Should have same number of
232
+ features (e.g., columns) than obs.
233
+ check_finite : bool, optional
234
+ Whether to check that the input matrices contain only finite numbers.
235
+ Disabling may give a performance gain, but may result in problems
236
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
237
+ Default: True
238
+
239
+ Returns
240
+ -------
241
+ code : ndarray
242
+ code[i] gives the label of the ith obversation; its code is
243
+ code_book[code[i]].
244
+ mind_dist : ndarray
245
+ min_dist[i] gives the distance between the ith observation and its
246
+ corresponding code.
247
+
248
+ Notes
249
+ -----
250
+ This function is slower than the C version but works for
251
+ all input types. If the inputs have the wrong types for the
252
+ C versions of the function, this one is called as a last resort.
253
+
254
+ It is about 20 times slower than the C version.
255
+
256
+ """
257
+ xp = array_namespace(obs, code_book)
258
+ obs = _asarray(obs, xp=xp, check_finite=check_finite)
259
+ code_book = _asarray(code_book, xp=xp, check_finite=check_finite)
260
+
261
+ if obs.ndim != code_book.ndim:
262
+ raise ValueError("Observation and code_book should have the same rank")
263
+
264
+ if obs.ndim == 1:
265
+ obs = obs[:, xp.newaxis]
266
+ code_book = code_book[:, xp.newaxis]
267
+
268
+ # Once `cdist` has array API support, this `xp.asarray` call can be removed
269
+ dist = xp.asarray(cdist(obs, code_book))
270
+ code = xp.argmin(dist, axis=1)
271
+ min_dist = xp.min(dist, axis=1)
272
+ return code, min_dist
273
+
274
+
275
+ def _kmeans(obs, guess, thresh=1e-5, xp=None):
276
+ """ "raw" version of k-means.
277
+
278
+ Returns
279
+ -------
280
+ code_book
281
+ The lowest distortion codebook found.
282
+ avg_dist
283
+ The average distance a observation is from a code in the book.
284
+ Lower means the code_book matches the data better.
285
+
286
+ See Also
287
+ --------
288
+ kmeans : wrapper around k-means
289
+
290
+ Examples
291
+ --------
292
+ Note: not whitened in this example.
293
+
294
+ >>> import numpy as np
295
+ >>> from scipy.cluster.vq import _kmeans
296
+ >>> features = np.array([[ 1.9,2.3],
297
+ ... [ 1.5,2.5],
298
+ ... [ 0.8,0.6],
299
+ ... [ 0.4,1.8],
300
+ ... [ 1.0,1.0]])
301
+ >>> book = np.array((features[0],features[2]))
302
+ >>> _kmeans(features,book)
303
+ (array([[ 1.7 , 2.4 ],
304
+ [ 0.73333333, 1.13333333]]), 0.40563916697728591)
305
+
306
+ """
307
+ xp = np if xp is None else xp
308
+ code_book = guess
309
+ diff = xp.inf
310
+ prev_avg_dists = deque([diff], maxlen=2)
311
+ while diff > thresh:
312
+ # compute membership and distances between obs and code_book
313
+ obs_code, distort = vq(obs, code_book, check_finite=False)
314
+ prev_avg_dists.append(xp.mean(distort, axis=-1))
315
+ # recalc code_book as centroids of associated obs
316
+ obs = np.asarray(obs)
317
+ obs_code = np.asarray(obs_code)
318
+ code_book, has_members = _vq.update_cluster_means(obs, obs_code,
319
+ code_book.shape[0])
320
+ obs = xp.asarray(obs)
321
+ obs_code = xp.asarray(obs_code)
322
+ code_book = xp.asarray(code_book)
323
+ has_members = xp.asarray(has_members)
324
+ code_book = code_book[has_members]
325
+ diff = xp.abs(prev_avg_dists[0] - prev_avg_dists[1])
326
+
327
+ return code_book, prev_avg_dists[1]
328
+
329
+
330
+ def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True,
331
+ *, seed=None):
332
+ """
333
+ Performs k-means on a set of observation vectors forming k clusters.
334
+
335
+ The k-means algorithm adjusts the classification of the observations
336
+ into clusters and updates the cluster centroids until the position of
337
+ the centroids is stable over successive iterations. In this
338
+ implementation of the algorithm, the stability of the centroids is
339
+ determined by comparing the absolute value of the change in the average
340
+ Euclidean distance between the observations and their corresponding
341
+ centroids against a threshold. This yields
342
+ a code book mapping centroids to codes and vice versa.
343
+
344
+ Parameters
345
+ ----------
346
+ obs : ndarray
347
+ Each row of the M by N array is an observation vector. The
348
+ columns are the features seen during each observation.
349
+ The features must be whitened first with the `whiten` function.
350
+
351
+ k_or_guess : int or ndarray
352
+ The number of centroids to generate. A code is assigned to
353
+ each centroid, which is also the row index of the centroid
354
+ in the code_book matrix generated.
355
+
356
+ The initial k centroids are chosen by randomly selecting
357
+ observations from the observation matrix. Alternatively,
358
+ passing a k by N array specifies the initial k centroids.
359
+
360
+ iter : int, optional
361
+ The number of times to run k-means, returning the codebook
362
+ with the lowest distortion. This argument is ignored if
363
+ initial centroids are specified with an array for the
364
+ ``k_or_guess`` parameter. This parameter does not represent the
365
+ number of iterations of the k-means algorithm.
366
+
367
+ thresh : float, optional
368
+ Terminates the k-means algorithm if the change in
369
+ distortion since the last k-means iteration is less than
370
+ or equal to threshold.
371
+
372
+ check_finite : bool, optional
373
+ Whether to check that the input matrices contain only finite numbers.
374
+ Disabling may give a performance gain, but may result in problems
375
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
376
+ Default: True
377
+
378
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
379
+ Seed for initializing the pseudo-random number generator.
380
+ If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
381
+ singleton is used.
382
+ If `seed` is an int, a new ``RandomState`` instance is used,
383
+ seeded with `seed`.
384
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
385
+ that instance is used.
386
+ The default is None.
387
+
388
+ Returns
389
+ -------
390
+ codebook : ndarray
391
+ A k by N array of k centroids. The ith centroid
392
+ codebook[i] is represented with the code i. The centroids
393
+ and codes generated represent the lowest distortion seen,
394
+ not necessarily the globally minimal distortion.
395
+ Note that the number of centroids is not necessarily the same as the
396
+ ``k_or_guess`` parameter, because centroids assigned to no observations
397
+ are removed during iterations.
398
+
399
+ distortion : float
400
+ The mean (non-squared) Euclidean distance between the observations
401
+ passed and the centroids generated. Note the difference to the standard
402
+ definition of distortion in the context of the k-means algorithm, which
403
+ is the sum of the squared distances.
404
+
405
+ See Also
406
+ --------
407
+ kmeans2 : a different implementation of k-means clustering
408
+ with more methods for generating initial centroids but without
409
+ using a distortion change threshold as a stopping criterion.
410
+
411
+ whiten : must be called prior to passing an observation matrix
412
+ to kmeans.
413
+
414
+ Notes
415
+ -----
416
+ For more functionalities or optimal performance, you can use
417
+ `sklearn.cluster.KMeans <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_.
418
+ `This <https://hdbscan.readthedocs.io/en/latest/performance_and_scalability.html#comparison-of-high-performance-implementations>`_
419
+ is a benchmark result of several implementations.
420
+
421
+ Examples
422
+ --------
423
+ >>> import numpy as np
424
+ >>> from scipy.cluster.vq import vq, kmeans, whiten
425
+ >>> import matplotlib.pyplot as plt
426
+ >>> features = np.array([[ 1.9,2.3],
427
+ ... [ 1.5,2.5],
428
+ ... [ 0.8,0.6],
429
+ ... [ 0.4,1.8],
430
+ ... [ 0.1,0.1],
431
+ ... [ 0.2,1.8],
432
+ ... [ 2.0,0.5],
433
+ ... [ 0.3,1.5],
434
+ ... [ 1.0,1.0]])
435
+ >>> whitened = whiten(features)
436
+ >>> book = np.array((whitened[0],whitened[2]))
437
+ >>> kmeans(whitened,book)
438
+ (array([[ 2.3110306 , 2.86287398], # random
439
+ [ 0.93218041, 1.24398691]]), 0.85684700941625547)
440
+
441
+ >>> codes = 3
442
+ >>> kmeans(whitened,codes)
443
+ (array([[ 2.3110306 , 2.86287398], # random
444
+ [ 1.32544402, 0.65607529],
445
+ [ 0.40782893, 2.02786907]]), 0.5196582527686241)
446
+
447
+ >>> # Create 50 datapoints in two clusters a and b
448
+ >>> pts = 50
449
+ >>> rng = np.random.default_rng()
450
+ >>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
451
+ >>> b = rng.multivariate_normal([30, 10],
452
+ ... [[10, 2], [2, 1]],
453
+ ... size=pts)
454
+ >>> features = np.concatenate((a, b))
455
+ >>> # Whiten data
456
+ >>> whitened = whiten(features)
457
+ >>> # Find 2 clusters in the data
458
+ >>> codebook, distortion = kmeans(whitened, 2)
459
+ >>> # Plot whitened data and cluster centers in red
460
+ >>> plt.scatter(whitened[:, 0], whitened[:, 1])
461
+ >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
462
+ >>> plt.show()
463
+
464
+ """
465
+ if isinstance(k_or_guess, int):
466
+ xp = array_namespace(obs)
467
+ else:
468
+ xp = array_namespace(obs, k_or_guess)
469
+ obs = _asarray(obs, xp=xp, check_finite=check_finite)
470
+ guess = _asarray(k_or_guess, xp=xp, check_finite=check_finite)
471
+ if iter < 1:
472
+ raise ValueError("iter must be at least 1, got %s" % iter)
473
+
474
+ # Determine whether a count (scalar) or an initial guess (array) was passed.
475
+ if size(guess) != 1:
476
+ if size(guess) < 1:
477
+ raise ValueError("Asked for 0 clusters. Initial book was %s" %
478
+ guess)
479
+ return _kmeans(obs, guess, thresh=thresh, xp=xp)
480
+
481
+ # k_or_guess is a scalar, now verify that it's an integer
482
+ k = int(guess)
483
+ if k != guess:
484
+ raise ValueError("If k_or_guess is a scalar, it must be an integer.")
485
+ if k < 1:
486
+ raise ValueError("Asked for %d clusters." % k)
487
+
488
+ rng = check_random_state(seed)
489
+
490
+ # initialize best distance value to a large value
491
+ best_dist = xp.inf
492
+ for i in range(iter):
493
+ # the initial code book is randomly selected from observations
494
+ guess = _kpoints(obs, k, rng, xp)
495
+ book, dist = _kmeans(obs, guess, thresh=thresh, xp=xp)
496
+ if dist < best_dist:
497
+ best_book = book
498
+ best_dist = dist
499
+ return best_book, best_dist
500
+
501
+
502
+ def _kpoints(data, k, rng, xp):
503
+ """Pick k points at random in data (one row = one observation).
504
+
505
+ Parameters
506
+ ----------
507
+ data : ndarray
508
+ Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
509
+ dimensional data, rank 2 multidimensional data, in which case one
510
+ row is one observation.
511
+ k : int
512
+ Number of samples to generate.
513
+ rng : `numpy.random.Generator` or `numpy.random.RandomState`
514
+ Random number generator.
515
+
516
+ Returns
517
+ -------
518
+ x : ndarray
519
+ A 'k' by 'N' containing the initial centroids
520
+
521
+ """
522
+ idx = rng.choice(data.shape[0], size=int(k), replace=False)
523
+ # convert to array with default integer dtype (avoids numpy#25607)
524
+ idx = xp.asarray(idx, dtype=xp.asarray([1]).dtype)
525
+ return xp.take(data, idx, axis=0)
526
+
527
+
528
+ def _krandinit(data, k, rng, xp):
529
+ """Returns k samples of a random variable whose parameters depend on data.
530
+
531
+ More precisely, it returns k observations sampled from a Gaussian random
532
+ variable whose mean and covariances are the ones estimated from the data.
533
+
534
+ Parameters
535
+ ----------
536
+ data : ndarray
537
+ Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
538
+ data, rank 2 multidimensional data, in which case one
539
+ row is one observation.
540
+ k : int
541
+ Number of samples to generate.
542
+ rng : `numpy.random.Generator` or `numpy.random.RandomState`
543
+ Random number generator.
544
+
545
+ Returns
546
+ -------
547
+ x : ndarray
548
+ A 'k' by 'N' containing the initial centroids
549
+
550
+ """
551
+ mu = xp.mean(data, axis=0)
552
+ k = np.asarray(k)
553
+
554
+ if data.ndim == 1:
555
+ _cov = cov(data)
556
+ x = rng.standard_normal(size=k)
557
+ x = xp.asarray(x)
558
+ x *= xp.sqrt(_cov)
559
+ elif data.shape[1] > data.shape[0]:
560
+ # initialize when the covariance matrix is rank deficient
561
+ _, s, vh = xp.linalg.svd(data - mu, full_matrices=False)
562
+ x = rng.standard_normal(size=(k, size(s)))
563
+ x = xp.asarray(x)
564
+ sVh = s[:, None] * vh / xp.sqrt(data.shape[0] - xp.asarray(1.))
565
+ x = x @ sVh
566
+ else:
567
+ _cov = atleast_nd(cov(data.T), ndim=2)
568
+
569
+ # k rows, d cols (one row = one obs)
570
+ # Generate k sample of a random variable ~ Gaussian(mu, cov)
571
+ x = rng.standard_normal(size=(k, size(mu)))
572
+ x = xp.asarray(x)
573
+ x = x @ xp.linalg.cholesky(_cov).T
574
+
575
+ x += mu
576
+ return x
577
+
578
+
579
+ def _kpp(data, k, rng, xp):
580
+ """ Picks k points in the data based on the kmeans++ method.
581
+
582
+ Parameters
583
+ ----------
584
+ data : ndarray
585
+ Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
586
+ data, rank 2 multidimensional data, in which case one
587
+ row is one observation.
588
+ k : int
589
+ Number of samples to generate.
590
+ rng : `numpy.random.Generator` or `numpy.random.RandomState`
591
+ Random number generator.
592
+
593
+ Returns
594
+ -------
595
+ init : ndarray
596
+ A 'k' by 'N' containing the initial centroids.
597
+
598
+ References
599
+ ----------
600
+ .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
601
+ careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
602
+ on Discrete Algorithms, 2007.
603
+ """
604
+
605
+ ndim = len(data.shape)
606
+ if ndim == 1:
607
+ data = data[:, None]
608
+
609
+ dims = data.shape[1]
610
+
611
+ init = xp.empty((int(k), dims))
612
+
613
+ for i in range(k):
614
+ if i == 0:
615
+ init[i, :] = data[rng_integers(rng, data.shape[0]), :]
616
+
617
+ else:
618
+ D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0)
619
+ probs = D2/D2.sum()
620
+ cumprobs = probs.cumsum()
621
+ r = rng.uniform()
622
+ cumprobs = np.asarray(cumprobs)
623
+ init[i, :] = data[np.searchsorted(cumprobs, r), :]
624
+
625
+ if ndim == 1:
626
+ init = init[:, 0]
627
+ return init
628
+
629
+
630
+ _valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp}
631
+
632
+
633
+ def _missing_warn():
634
+ """Print a warning when called."""
635
+ warnings.warn("One of the clusters is empty. "
636
+ "Re-run kmeans with a different initialization.",
637
+ stacklevel=3)
638
+
639
+
640
+ def _missing_raise():
641
+ """Raise a ClusterError when called."""
642
+ raise ClusterError("One of the clusters is empty. "
643
+ "Re-run kmeans with a different initialization.")
644
+
645
+
646
+ _valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
647
+
648
+
649
+ def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
650
+ missing='warn', check_finite=True, *, seed=None):
651
+ """
652
+ Classify a set of observations into k clusters using the k-means algorithm.
653
+
654
+ The algorithm attempts to minimize the Euclidean distance between
655
+ observations and centroids. Several initialization methods are
656
+ included.
657
+
658
+ Parameters
659
+ ----------
660
+ data : ndarray
661
+ A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
662
+ 'M' array of 'M' 1-D observations.
663
+ k : int or ndarray
664
+ The number of clusters to form as well as the number of
665
+ centroids to generate. If `minit` initialization string is
666
+ 'matrix', or if a ndarray is given instead, it is
667
+ interpreted as initial cluster to use instead.
668
+ iter : int, optional
669
+ Number of iterations of the k-means algorithm to run. Note
670
+ that this differs in meaning from the iters parameter to
671
+ the kmeans function.
672
+ thresh : float, optional
673
+ (not used yet)
674
+ minit : str, optional
675
+ Method for initialization. Available methods are 'random',
676
+ 'points', '++' and 'matrix':
677
+
678
+ 'random': generate k centroids from a Gaussian with mean and
679
+ variance estimated from the data.
680
+
681
+ 'points': choose k observations (rows) at random from data for
682
+ the initial centroids.
683
+
684
+ '++': choose k observations accordingly to the kmeans++ method
685
+ (careful seeding)
686
+
687
+ 'matrix': interpret the k parameter as a k by M (or length k
688
+ array for 1-D data) array of initial centroids.
689
+ missing : str, optional
690
+ Method to deal with empty clusters. Available methods are
691
+ 'warn' and 'raise':
692
+
693
+ 'warn': give a warning and continue.
694
+
695
+ 'raise': raise an ClusterError and terminate the algorithm.
696
+ check_finite : bool, optional
697
+ Whether to check that the input matrices contain only finite numbers.
698
+ Disabling may give a performance gain, but may result in problems
699
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
700
+ Default: True
701
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
702
+ Seed for initializing the pseudo-random number generator.
703
+ If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
704
+ singleton is used.
705
+ If `seed` is an int, a new ``RandomState`` instance is used,
706
+ seeded with `seed`.
707
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
708
+ that instance is used.
709
+ The default is None.
710
+
711
+ Returns
712
+ -------
713
+ centroid : ndarray
714
+ A 'k' by 'N' array of centroids found at the last iteration of
715
+ k-means.
716
+ label : ndarray
717
+ label[i] is the code or index of the centroid the
718
+ ith observation is closest to.
719
+
720
+ See Also
721
+ --------
722
+ kmeans
723
+
724
+ References
725
+ ----------
726
+ .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
727
+ careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
728
+ on Discrete Algorithms, 2007.
729
+
730
+ Examples
731
+ --------
732
+ >>> from scipy.cluster.vq import kmeans2
733
+ >>> import matplotlib.pyplot as plt
734
+ >>> import numpy as np
735
+
736
+ Create z, an array with shape (100, 2) containing a mixture of samples
737
+ from three multivariate normal distributions.
738
+
739
+ >>> rng = np.random.default_rng()
740
+ >>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45)
741
+ >>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30)
742
+ >>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25)
743
+ >>> z = np.concatenate((a, b, c))
744
+ >>> rng.shuffle(z)
745
+
746
+ Compute three clusters.
747
+
748
+ >>> centroid, label = kmeans2(z, 3, minit='points')
749
+ >>> centroid
750
+ array([[ 2.22274463, -0.61666946], # may vary
751
+ [ 0.54069047, 5.86541444],
752
+ [ 6.73846769, 4.01991898]])
753
+
754
+ How many points are in each cluster?
755
+
756
+ >>> counts = np.bincount(label)
757
+ >>> counts
758
+ array([29, 51, 20]) # may vary
759
+
760
+ Plot the clusters.
761
+
762
+ >>> w0 = z[label == 0]
763
+ >>> w1 = z[label == 1]
764
+ >>> w2 = z[label == 2]
765
+ >>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0')
766
+ >>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1')
767
+ >>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2')
768
+ >>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids')
769
+ >>> plt.axis('equal')
770
+ >>> plt.legend(shadow=True)
771
+ >>> plt.show()
772
+
773
+ """
774
+ if int(iter) < 1:
775
+ raise ValueError("Invalid iter (%s), "
776
+ "must be a positive integer." % iter)
777
+ try:
778
+ miss_meth = _valid_miss_meth[missing]
779
+ except KeyError as e:
780
+ raise ValueError(f"Unknown missing method {missing!r}") from e
781
+
782
+ if isinstance(k, int):
783
+ xp = array_namespace(data)
784
+ else:
785
+ xp = array_namespace(data, k)
786
+ data = _asarray(data, xp=xp, check_finite=check_finite)
787
+ code_book = copy(k, xp=xp)
788
+ if data.ndim == 1:
789
+ d = 1
790
+ elif data.ndim == 2:
791
+ d = data.shape[1]
792
+ else:
793
+ raise ValueError("Input of rank > 2 is not supported.")
794
+
795
+ if size(data) < 1 or size(code_book) < 1:
796
+ raise ValueError("Empty input is not supported.")
797
+
798
+ # If k is not a single value, it should be compatible with data's shape
799
+ if minit == 'matrix' or size(code_book) > 1:
800
+ if data.ndim != code_book.ndim:
801
+ raise ValueError("k array doesn't match data rank")
802
+ nc = code_book.shape[0]
803
+ if data.ndim > 1 and code_book.shape[1] != d:
804
+ raise ValueError("k array doesn't match data dimension")
805
+ else:
806
+ nc = int(code_book)
807
+
808
+ if nc < 1:
809
+ raise ValueError("Cannot ask kmeans2 for %d clusters"
810
+ " (k was %s)" % (nc, code_book))
811
+ elif nc != code_book:
812
+ warnings.warn("k was not an integer, was converted.", stacklevel=2)
813
+
814
+ try:
815
+ init_meth = _valid_init_meth[minit]
816
+ except KeyError as e:
817
+ raise ValueError(f"Unknown init method {minit!r}") from e
818
+ else:
819
+ rng = check_random_state(seed)
820
+ code_book = init_meth(data, code_book, rng, xp)
821
+
822
+ data = np.asarray(data)
823
+ code_book = np.asarray(code_book)
824
+ for i in range(iter):
825
+ # Compute the nearest neighbor for each obs using the current code book
826
+ label = vq(data, code_book, check_finite=check_finite)[0]
827
+ # Update the code book by computing centroids
828
+ new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
829
+ if not has_members.all():
830
+ miss_meth()
831
+ # Set the empty clusters to their previous positions
832
+ new_code_book[~has_members] = code_book[~has_members]
833
+ code_book = new_code_book
834
+
835
+ return xp.asarray(code_book), xp.asarray(label)
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (9.37 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc ADDED
Binary file (46.8 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc ADDED
Binary file (22.9 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc ADDED
Binary file (33.4 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc ADDED
Binary file (42.6 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc ADDED
Binary file (27 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc ADDED
Binary file (22.8 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc ADDED
Binary file (5.02 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc ADDED
Binary file (4.22 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc ADDED
Binary file (5.36 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc ADDED
Binary file (1.82 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc ADDED
Binary file (786 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc ADDED
Binary file (787 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc ADDED
Binary file (950 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc ADDED
Binary file (859 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc ADDED
Binary file (814 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc ADDED
Binary file (651 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc ADDED
Binary file (688 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc ADDED
Binary file (627 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc ADDED
Binary file (707 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc ADDED
Binary file (733 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc ADDED
Binary file (625 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc ADDED
Binary file (740 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc ADDED
Binary file (1.78 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc ADDED
Binary file (647 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc ADDED
Binary file (905 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Linear Solvers
3
+ ==============
4
+
5
+ The default solver is SuperLU (included in the scipy distribution),
6
+ which can solve real or complex linear systems in both single and
7
+ double precisions. It is automatically replaced by UMFPACK, if
8
+ available. Note that UMFPACK works in double precision only, so
9
+ switch it off by::
10
+
11
+ >>> from scipy.sparse.linalg import spsolve, use_solver
12
+ >>> use_solver(useUmfpack=False)
13
+
14
+ to solve in the single precision. See also use_solver documentation.
15
+
16
+ Example session::
17
+
18
+ >>> from scipy.sparse import csc_matrix, spdiags
19
+ >>> from numpy import array
20
+ >>>
21
+ >>> print("Inverting a sparse linear system:")
22
+ >>> print("The sparse matrix (constructed from diagonals):")
23
+ >>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
24
+ >>> b = array([1, 2, 3, 4, 5])
25
+ >>> print("Solve: single precision complex:")
26
+ >>> use_solver( useUmfpack = False )
27
+ >>> a = a.astype('F')
28
+ >>> x = spsolve(a, b)
29
+ >>> print(x)
30
+ >>> print("Error: ", a@x-b)
31
+ >>>
32
+ >>> print("Solve: double precision complex:")
33
+ >>> use_solver( useUmfpack = True )
34
+ >>> a = a.astype('D')
35
+ >>> x = spsolve(a, b)
36
+ >>> print(x)
37
+ >>> print("Error: ", a@x-b)
38
+ >>>
39
+ >>> print("Solve: double precision:")
40
+ >>> a = a.astype('d')
41
+ >>> x = spsolve(a, b)
42
+ >>> print(x)
43
+ >>> print("Error: ", a@x-b)
44
+ >>>
45
+ >>> print("Solve: single precision:")
46
+ >>> use_solver( useUmfpack = False )
47
+ >>> a = a.astype('f')
48
+ >>> x = spsolve(a, b.astype('f'))
49
+ >>> print(x)
50
+ >>> print("Error: ", a@x-b)
51
+
52
+ """
53
+
54
+ #import umfpack
55
+ #__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) )
56
+ #del umfpack
57
+
58
+ from .linsolve import *
59
+ from ._superlu import SuperLU
60
+ from . import _add_newdocs
61
+ from . import linsolve
62
+
63
+ __all__ = [
64
+ 'MatrixRankWarning', 'SuperLU', 'factorized',
65
+ 'spilu', 'splu', 'spsolve',
66
+ 'spsolve_triangular', 'use_solver'
67
+ ]
68
+
69
+ from scipy._lib._testutils import PytestTester
70
+ test = PytestTester(__name__)
71
+ del PytestTester
venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc ADDED
Binary file (21.4 kB). View file
 
venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.lib import add_newdoc
2
+
3
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU',
4
+ """
5
+ LU factorization of a sparse matrix.
6
+
7
+ Factorization is represented as::
8
+
9
+ Pr @ A @ Pc = L @ U
10
+
11
+ To construct these `SuperLU` objects, call the `splu` and `spilu`
12
+ functions.
13
+
14
+ Attributes
15
+ ----------
16
+ shape
17
+ nnz
18
+ perm_c
19
+ perm_r
20
+ L
21
+ U
22
+
23
+ Methods
24
+ -------
25
+ solve
26
+
27
+ Notes
28
+ -----
29
+
30
+ .. versionadded:: 0.14.0
31
+
32
+ Examples
33
+ --------
34
+ The LU decomposition can be used to solve matrix equations. Consider:
35
+
36
+ >>> import numpy as np
37
+ >>> from scipy.sparse import csc_matrix
38
+ >>> from scipy.sparse.linalg import splu
39
+ >>> A = csc_matrix([[1,2,0,4], [1,0,0,1], [1,0,2,1], [2,2,1,0.]])
40
+
41
+ This can be solved for a given right-hand side:
42
+
43
+ >>> lu = splu(A)
44
+ >>> b = np.array([1, 2, 3, 4])
45
+ >>> x = lu.solve(b)
46
+ >>> A.dot(x)
47
+ array([ 1., 2., 3., 4.])
48
+
49
+ The ``lu`` object also contains an explicit representation of the
50
+ decomposition. The permutations are represented as mappings of
51
+ indices:
52
+
53
+ >>> lu.perm_r
54
+ array([2, 1, 3, 0], dtype=int32) # may vary
55
+ >>> lu.perm_c
56
+ array([0, 1, 3, 2], dtype=int32) # may vary
57
+
58
+ The L and U factors are sparse matrices in CSC format:
59
+
60
+ >>> lu.L.toarray()
61
+ array([[ 1. , 0. , 0. , 0. ], # may vary
62
+ [ 0.5, 1. , 0. , 0. ],
63
+ [ 0.5, -1. , 1. , 0. ],
64
+ [ 0.5, 1. , 0. , 1. ]])
65
+ >>> lu.U.toarray()
66
+ array([[ 2. , 2. , 0. , 1. ], # may vary
67
+ [ 0. , -1. , 1. , -0.5],
68
+ [ 0. , 0. , 5. , -1. ],
69
+ [ 0. , 0. , 0. , 2. ]])
70
+
71
+ The permutation matrices can be constructed:
72
+
73
+ >>> Pr = csc_matrix((np.ones(4), (lu.perm_r, np.arange(4))))
74
+ >>> Pc = csc_matrix((np.ones(4), (np.arange(4), lu.perm_c)))
75
+
76
+ We can reassemble the original matrix:
77
+
78
+ >>> (Pr.T @ (lu.L @ lu.U) @ Pc.T).toarray()
79
+ array([[ 1., 2., 0., 4.],
80
+ [ 1., 0., 0., 1.],
81
+ [ 1., 0., 2., 1.],
82
+ [ 2., 2., 1., 0.]])
83
+ """)
84
+
85
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('solve',
86
+ """
87
+ solve(rhs[, trans])
88
+
89
+ Solves linear system of equations with one or several right-hand sides.
90
+
91
+ Parameters
92
+ ----------
93
+ rhs : ndarray, shape (n,) or (n, k)
94
+ Right hand side(s) of equation
95
+ trans : {'N', 'T', 'H'}, optional
96
+ Type of system to solve::
97
+
98
+ 'N': A @ x == rhs (default)
99
+ 'T': A^T @ x == rhs
100
+ 'H': A^H @ x == rhs
101
+
102
+ i.e., normal, transposed, and hermitian conjugate.
103
+
104
+ Returns
105
+ -------
106
+ x : ndarray, shape ``rhs.shape``
107
+ Solution vector(s)
108
+ """))
109
+
110
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('L',
111
+ """
112
+ Lower triangular factor with unit diagonal as a
113
+ `scipy.sparse.csc_matrix`.
114
+
115
+ .. versionadded:: 0.14.0
116
+ """))
117
+
118
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('U',
119
+ """
120
+ Upper triangular factor as a `scipy.sparse.csc_matrix`.
121
+
122
+ .. versionadded:: 0.14.0
123
+ """))
124
+
125
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('shape',
126
+ """
127
+ Shape of the original matrix as a tuple of ints.
128
+ """))
129
+
130
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('nnz',
131
+ """
132
+ Number of nonzero elements in the matrix.
133
+ """))
134
+
135
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_c',
136
+ """
137
+ Permutation Pc represented as an array of indices.
138
+
139
+ The column permutation matrix can be reconstructed via:
140
+
141
+ >>> Pc = np.zeros((n, n))
142
+ >>> Pc[np.arange(n), perm_c] = 1
143
+ """))
144
+
145
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_r',
146
+ """
147
+ Permutation Pr represented as an array of indices.
148
+
149
+ The row permutation matrix can be reconstructed via:
150
+
151
+ >>> Pr = np.zeros((n, n))
152
+ >>> Pr[perm_r, np.arange(n)] = 1
153
+ """))
venv/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (379 kB). View file