applied-ai-018 commited on
Commit
e34f5cb
·
verified ·
1 Parent(s): c520ca7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__init__.py +56 -0
  2. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_affinity_propagation.py +604 -0
  15. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_agglomerative.py +1336 -0
  16. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_bicluster.py +622 -0
  17. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_birch.py +741 -0
  18. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_bisect_k_means.py +529 -0
  19. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py +476 -0
  20. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_dbscan_inner.cpython-310-x86_64-linux-gnu.so +0 -0
  21. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_feature_agglomeration.py +104 -0
  22. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__init__.py +0 -0
  23. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/__init__.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/hdbscan.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_linkage.cpython-310-x86_64-linux-gnu.so +0 -0
  26. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_reachability.cpython-310-x86_64-linux-gnu.so +0 -0
  27. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.cpython-310-x86_64-linux-gnu.so +0 -0
  28. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.pxd +49 -0
  29. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/hdbscan.py +1018 -0
  30. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__init__.py +0 -0
  31. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/test_reachibility.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/test_reachibility.py +63 -0
  34. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.cpython-310-x86_64-linux-gnu.so +0 -0
  35. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.pxd +9 -0
  36. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.cpython-310-x86_64-linux-gnu.so +0 -0
  37. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.pxd +48 -0
  38. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_elkan.cpython-310-x86_64-linux-gnu.so +0 -0
  39. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_lloyd.cpython-310-x86_64-linux-gnu.so +0 -0
  40. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_minibatch.cpython-310-x86_64-linux-gnu.so +0 -0
  41. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_kmeans.py +2318 -0
  42. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_mean_shift.py +575 -0
  43. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_optics.py +1199 -0
  44. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_spectral.py +799 -0
  45. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__init__.py +0 -0
  46. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__init__.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
3
+ algorithms.
4
+ """
5
+
6
+ from ._affinity_propagation import AffinityPropagation, affinity_propagation
7
+ from ._agglomerative import (
8
+ AgglomerativeClustering,
9
+ FeatureAgglomeration,
10
+ linkage_tree,
11
+ ward_tree,
12
+ )
13
+ from ._bicluster import SpectralBiclustering, SpectralCoclustering
14
+ from ._birch import Birch
15
+ from ._bisect_k_means import BisectingKMeans
16
+ from ._dbscan import DBSCAN, dbscan
17
+ from ._hdbscan.hdbscan import HDBSCAN
18
+ from ._kmeans import KMeans, MiniBatchKMeans, k_means, kmeans_plusplus
19
+ from ._mean_shift import MeanShift, estimate_bandwidth, get_bin_seeds, mean_shift
20
+ from ._optics import (
21
+ OPTICS,
22
+ cluster_optics_dbscan,
23
+ cluster_optics_xi,
24
+ compute_optics_graph,
25
+ )
26
+ from ._spectral import SpectralClustering, spectral_clustering
27
+
28
+ __all__ = [
29
+ "AffinityPropagation",
30
+ "AgglomerativeClustering",
31
+ "Birch",
32
+ "DBSCAN",
33
+ "OPTICS",
34
+ "cluster_optics_dbscan",
35
+ "cluster_optics_xi",
36
+ "compute_optics_graph",
37
+ "KMeans",
38
+ "BisectingKMeans",
39
+ "FeatureAgglomeration",
40
+ "MeanShift",
41
+ "MiniBatchKMeans",
42
+ "SpectralClustering",
43
+ "affinity_propagation",
44
+ "dbscan",
45
+ "estimate_bandwidth",
46
+ "get_bin_seeds",
47
+ "k_means",
48
+ "kmeans_plusplus",
49
+ "linkage_tree",
50
+ "mean_shift",
51
+ "spectral_clustering",
52
+ "ward_tree",
53
+ "SpectralBiclustering",
54
+ "SpectralCoclustering",
55
+ "HDBSCAN",
56
+ ]
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc ADDED
Binary file (37.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc ADDED
Binary file (19.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc ADDED
Binary file (19.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc ADDED
Binary file (3.33 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc ADDED
Binary file (61.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc ADDED
Binary file (35.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc ADDED
Binary file (26.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_affinity_propagation.py ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Affinity Propagation clustering algorithm."""
2
+
3
+ # Author: Alexandre Gramfort [email protected]
4
+ # Gael Varoquaux [email protected]
5
+
6
+ # License: BSD 3 clause
7
+
8
+ import warnings
9
+ from numbers import Integral, Real
10
+
11
+ import numpy as np
12
+
13
+ from .._config import config_context
14
+ from ..base import BaseEstimator, ClusterMixin, _fit_context
15
+ from ..exceptions import ConvergenceWarning
16
+ from ..metrics import euclidean_distances, pairwise_distances_argmin
17
+ from ..utils import check_random_state
18
+ from ..utils._param_validation import Interval, StrOptions, validate_params
19
+ from ..utils.validation import check_is_fitted
20
+
21
+
22
+ def _equal_similarities_and_preferences(S, preference):
23
+ def all_equal_preferences():
24
+ return np.all(preference == preference.flat[0])
25
+
26
+ def all_equal_similarities():
27
+ # Create mask to ignore diagonal of S
28
+ mask = np.ones(S.shape, dtype=bool)
29
+ np.fill_diagonal(mask, 0)
30
+
31
+ return np.all(S[mask].flat == S[mask].flat[0])
32
+
33
+ return all_equal_preferences() and all_equal_similarities()
34
+
35
+
36
+ def _affinity_propagation(
37
+ S,
38
+ *,
39
+ preference,
40
+ convergence_iter,
41
+ max_iter,
42
+ damping,
43
+ verbose,
44
+ return_n_iter,
45
+ random_state,
46
+ ):
47
+ """Main affinity propagation algorithm."""
48
+ n_samples = S.shape[0]
49
+ if n_samples == 1 or _equal_similarities_and_preferences(S, preference):
50
+ # It makes no sense to run the algorithm in this case, so return 1 or
51
+ # n_samples clusters, depending on preferences
52
+ warnings.warn(
53
+ "All samples have mutually equal similarities. "
54
+ "Returning arbitrary cluster center(s)."
55
+ )
56
+ if preference.flat[0] > S.flat[n_samples - 1]:
57
+ return (
58
+ (np.arange(n_samples), np.arange(n_samples), 0)
59
+ if return_n_iter
60
+ else (np.arange(n_samples), np.arange(n_samples))
61
+ )
62
+ else:
63
+ return (
64
+ (np.array([0]), np.array([0] * n_samples), 0)
65
+ if return_n_iter
66
+ else (np.array([0]), np.array([0] * n_samples))
67
+ )
68
+
69
+ # Place preference on the diagonal of S
70
+ S.flat[:: (n_samples + 1)] = preference
71
+
72
+ A = np.zeros((n_samples, n_samples))
73
+ R = np.zeros((n_samples, n_samples)) # Initialize messages
74
+ # Intermediate results
75
+ tmp = np.zeros((n_samples, n_samples))
76
+
77
+ # Remove degeneracies
78
+ S += (
79
+ np.finfo(S.dtype).eps * S + np.finfo(S.dtype).tiny * 100
80
+ ) * random_state.standard_normal(size=(n_samples, n_samples))
81
+
82
+ # Execute parallel affinity propagation updates
83
+ e = np.zeros((n_samples, convergence_iter))
84
+
85
+ ind = np.arange(n_samples)
86
+
87
+ for it in range(max_iter):
88
+ # tmp = A + S; compute responsibilities
89
+ np.add(A, S, tmp)
90
+ I = np.argmax(tmp, axis=1)
91
+ Y = tmp[ind, I] # np.max(A + S, axis=1)
92
+ tmp[ind, I] = -np.inf
93
+ Y2 = np.max(tmp, axis=1)
94
+
95
+ # tmp = Rnew
96
+ np.subtract(S, Y[:, None], tmp)
97
+ tmp[ind, I] = S[ind, I] - Y2
98
+
99
+ # Damping
100
+ tmp *= 1 - damping
101
+ R *= damping
102
+ R += tmp
103
+
104
+ # tmp = Rp; compute availabilities
105
+ np.maximum(R, 0, tmp)
106
+ tmp.flat[:: n_samples + 1] = R.flat[:: n_samples + 1]
107
+
108
+ # tmp = -Anew
109
+ tmp -= np.sum(tmp, axis=0)
110
+ dA = np.diag(tmp).copy()
111
+ tmp.clip(0, np.inf, tmp)
112
+ tmp.flat[:: n_samples + 1] = dA
113
+
114
+ # Damping
115
+ tmp *= 1 - damping
116
+ A *= damping
117
+ A -= tmp
118
+
119
+ # Check for convergence
120
+ E = (np.diag(A) + np.diag(R)) > 0
121
+ e[:, it % convergence_iter] = E
122
+ K = np.sum(E, axis=0)
123
+
124
+ if it >= convergence_iter:
125
+ se = np.sum(e, axis=1)
126
+ unconverged = np.sum((se == convergence_iter) + (se == 0)) != n_samples
127
+ if (not unconverged and (K > 0)) or (it == max_iter):
128
+ never_converged = False
129
+ if verbose:
130
+ print("Converged after %d iterations." % it)
131
+ break
132
+ else:
133
+ never_converged = True
134
+ if verbose:
135
+ print("Did not converge")
136
+
137
+ I = np.flatnonzero(E)
138
+ K = I.size # Identify exemplars
139
+
140
+ if K > 0:
141
+ if never_converged:
142
+ warnings.warn(
143
+ (
144
+ "Affinity propagation did not converge, this model "
145
+ "may return degenerate cluster centers and labels."
146
+ ),
147
+ ConvergenceWarning,
148
+ )
149
+ c = np.argmax(S[:, I], axis=1)
150
+ c[I] = np.arange(K) # Identify clusters
151
+ # Refine the final set of exemplars and clusters and return results
152
+ for k in range(K):
153
+ ii = np.where(c == k)[0]
154
+ j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
155
+ I[k] = ii[j]
156
+
157
+ c = np.argmax(S[:, I], axis=1)
158
+ c[I] = np.arange(K)
159
+ labels = I[c]
160
+ # Reduce labels to a sorted, gapless, list
161
+ cluster_centers_indices = np.unique(labels)
162
+ labels = np.searchsorted(cluster_centers_indices, labels)
163
+ else:
164
+ warnings.warn(
165
+ (
166
+ "Affinity propagation did not converge and this model "
167
+ "will not have any cluster centers."
168
+ ),
169
+ ConvergenceWarning,
170
+ )
171
+ labels = np.array([-1] * n_samples)
172
+ cluster_centers_indices = []
173
+
174
+ if return_n_iter:
175
+ return cluster_centers_indices, labels, it + 1
176
+ else:
177
+ return cluster_centers_indices, labels
178
+
179
+
180
+ ###############################################################################
181
+ # Public API
182
+
183
+
184
+ @validate_params(
185
+ {
186
+ "S": ["array-like"],
187
+ "return_n_iter": ["boolean"],
188
+ },
189
+ prefer_skip_nested_validation=False,
190
+ )
191
+ def affinity_propagation(
192
+ S,
193
+ *,
194
+ preference=None,
195
+ convergence_iter=15,
196
+ max_iter=200,
197
+ damping=0.5,
198
+ copy=True,
199
+ verbose=False,
200
+ return_n_iter=False,
201
+ random_state=None,
202
+ ):
203
+ """Perform Affinity Propagation Clustering of data.
204
+
205
+ Read more in the :ref:`User Guide <affinity_propagation>`.
206
+
207
+ Parameters
208
+ ----------
209
+ S : array-like of shape (n_samples, n_samples)
210
+ Matrix of similarities between points.
211
+
212
+ preference : array-like of shape (n_samples,) or float, default=None
213
+ Preferences for each point - points with larger values of
214
+ preferences are more likely to be chosen as exemplars. The number of
215
+ exemplars, i.e. of clusters, is influenced by the input preferences
216
+ value. If the preferences are not passed as arguments, they will be
217
+ set to the median of the input similarities (resulting in a moderate
218
+ number of clusters). For a smaller amount of clusters, this can be set
219
+ to the minimum value of the similarities.
220
+
221
+ convergence_iter : int, default=15
222
+ Number of iterations with no change in the number
223
+ of estimated clusters that stops the convergence.
224
+
225
+ max_iter : int, default=200
226
+ Maximum number of iterations.
227
+
228
+ damping : float, default=0.5
229
+ Damping factor between 0.5 and 1.
230
+
231
+ copy : bool, default=True
232
+ If copy is False, the affinity matrix is modified inplace by the
233
+ algorithm, for memory efficiency.
234
+
235
+ verbose : bool, default=False
236
+ The verbosity level.
237
+
238
+ return_n_iter : bool, default=False
239
+ Whether or not to return the number of iterations.
240
+
241
+ random_state : int, RandomState instance or None, default=None
242
+ Pseudo-random number generator to control the starting state.
243
+ Use an int for reproducible results across function calls.
244
+ See the :term:`Glossary <random_state>`.
245
+
246
+ .. versionadded:: 0.23
247
+ this parameter was previously hardcoded as 0.
248
+
249
+ Returns
250
+ -------
251
+ cluster_centers_indices : ndarray of shape (n_clusters,)
252
+ Index of clusters centers.
253
+
254
+ labels : ndarray of shape (n_samples,)
255
+ Cluster labels for each point.
256
+
257
+ n_iter : int
258
+ Number of iterations run. Returned only if `return_n_iter` is
259
+ set to True.
260
+
261
+ Notes
262
+ -----
263
+ For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
264
+ <sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
265
+
266
+ When the algorithm does not converge, it will still return a arrays of
267
+ ``cluster_center_indices`` and labels if there are any exemplars/clusters,
268
+ however they may be degenerate and should be used with caution.
269
+
270
+ When all training samples have equal similarities and equal preferences,
271
+ the assignment of cluster centers and labels depends on the preference.
272
+ If the preference is smaller than the similarities, a single cluster center
273
+ and label ``0`` for every sample will be returned. Otherwise, every
274
+ training sample becomes its own cluster center and is assigned a unique
275
+ label.
276
+
277
+ References
278
+ ----------
279
+ Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
280
+ Between Data Points", Science Feb. 2007
281
+
282
+ Examples
283
+ --------
284
+ >>> import numpy as np
285
+ >>> from sklearn.cluster import affinity_propagation
286
+ >>> from sklearn.metrics.pairwise import euclidean_distances
287
+ >>> X = np.array([[1, 2], [1, 4], [1, 0],
288
+ ... [4, 2], [4, 4], [4, 0]])
289
+ >>> S = -euclidean_distances(X, squared=True)
290
+ >>> cluster_centers_indices, labels = affinity_propagation(S, random_state=0)
291
+ >>> cluster_centers_indices
292
+ array([0, 3])
293
+ >>> labels
294
+ array([0, 0, 0, 1, 1, 1])
295
+ """
296
+ estimator = AffinityPropagation(
297
+ damping=damping,
298
+ max_iter=max_iter,
299
+ convergence_iter=convergence_iter,
300
+ copy=copy,
301
+ preference=preference,
302
+ affinity="precomputed",
303
+ verbose=verbose,
304
+ random_state=random_state,
305
+ ).fit(S)
306
+
307
+ if return_n_iter:
308
+ return estimator.cluster_centers_indices_, estimator.labels_, estimator.n_iter_
309
+ return estimator.cluster_centers_indices_, estimator.labels_
310
+
311
+
312
+ class AffinityPropagation(ClusterMixin, BaseEstimator):
313
+ """Perform Affinity Propagation Clustering of data.
314
+
315
+ Read more in the :ref:`User Guide <affinity_propagation>`.
316
+
317
+ Parameters
318
+ ----------
319
+ damping : float, default=0.5
320
+ Damping factor in the range `[0.5, 1.0)` is the extent to
321
+ which the current value is maintained relative to
322
+ incoming values (weighted 1 - damping). This in order
323
+ to avoid numerical oscillations when updating these
324
+ values (messages).
325
+
326
+ max_iter : int, default=200
327
+ Maximum number of iterations.
328
+
329
+ convergence_iter : int, default=15
330
+ Number of iterations with no change in the number
331
+ of estimated clusters that stops the convergence.
332
+
333
+ copy : bool, default=True
334
+ Make a copy of input data.
335
+
336
+ preference : array-like of shape (n_samples,) or float, default=None
337
+ Preferences for each point - points with larger values of
338
+ preferences are more likely to be chosen as exemplars. The number
339
+ of exemplars, ie of clusters, is influenced by the input
340
+ preferences value. If the preferences are not passed as arguments,
341
+ they will be set to the median of the input similarities.
342
+
343
+ affinity : {'euclidean', 'precomputed'}, default='euclidean'
344
+ Which affinity to use. At the moment 'precomputed' and
345
+ ``euclidean`` are supported. 'euclidean' uses the
346
+ negative squared euclidean distance between points.
347
+
348
+ verbose : bool, default=False
349
+ Whether to be verbose.
350
+
351
+ random_state : int, RandomState instance or None, default=None
352
+ Pseudo-random number generator to control the starting state.
353
+ Use an int for reproducible results across function calls.
354
+ See the :term:`Glossary <random_state>`.
355
+
356
+ .. versionadded:: 0.23
357
+ this parameter was previously hardcoded as 0.
358
+
359
+ Attributes
360
+ ----------
361
+ cluster_centers_indices_ : ndarray of shape (n_clusters,)
362
+ Indices of cluster centers.
363
+
364
+ cluster_centers_ : ndarray of shape (n_clusters, n_features)
365
+ Cluster centers (if affinity != ``precomputed``).
366
+
367
+ labels_ : ndarray of shape (n_samples,)
368
+ Labels of each point.
369
+
370
+ affinity_matrix_ : ndarray of shape (n_samples, n_samples)
371
+ Stores the affinity matrix used in ``fit``.
372
+
373
+ n_iter_ : int
374
+ Number of iterations taken to converge.
375
+
376
+ n_features_in_ : int
377
+ Number of features seen during :term:`fit`.
378
+
379
+ .. versionadded:: 0.24
380
+
381
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
382
+ Names of features seen during :term:`fit`. Defined only when `X`
383
+ has feature names that are all strings.
384
+
385
+ .. versionadded:: 1.0
386
+
387
+ See Also
388
+ --------
389
+ AgglomerativeClustering : Recursively merges the pair of
390
+ clusters that minimally increases a given linkage distance.
391
+ FeatureAgglomeration : Similar to AgglomerativeClustering,
392
+ but recursively merges features instead of samples.
393
+ KMeans : K-Means clustering.
394
+ MiniBatchKMeans : Mini-Batch K-Means clustering.
395
+ MeanShift : Mean shift clustering using a flat kernel.
396
+ SpectralClustering : Apply clustering to a projection
397
+ of the normalized Laplacian.
398
+
399
+ Notes
400
+ -----
401
+ For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
402
+ <sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
403
+
404
+ The algorithmic complexity of affinity propagation is quadratic
405
+ in the number of points.
406
+
407
+ When the algorithm does not converge, it will still return a arrays of
408
+ ``cluster_center_indices`` and labels if there are any exemplars/clusters,
409
+ however they may be degenerate and should be used with caution.
410
+
411
+ When ``fit`` does not converge, ``cluster_centers_`` is still populated
412
+ however it may be degenerate. In such a case, proceed with caution.
413
+ If ``fit`` does not converge and fails to produce any ``cluster_centers_``
414
+ then ``predict`` will label every sample as ``-1``.
415
+
416
+ When all training samples have equal similarities and equal preferences,
417
+ the assignment of cluster centers and labels depends on the preference.
418
+ If the preference is smaller than the similarities, ``fit`` will result in
419
+ a single cluster center and label ``0`` for every sample. Otherwise, every
420
+ training sample becomes its own cluster center and is assigned a unique
421
+ label.
422
+
423
+ References
424
+ ----------
425
+
426
+ Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
427
+ Between Data Points", Science Feb. 2007
428
+
429
+ Examples
430
+ --------
431
+ >>> from sklearn.cluster import AffinityPropagation
432
+ >>> import numpy as np
433
+ >>> X = np.array([[1, 2], [1, 4], [1, 0],
434
+ ... [4, 2], [4, 4], [4, 0]])
435
+ >>> clustering = AffinityPropagation(random_state=5).fit(X)
436
+ >>> clustering
437
+ AffinityPropagation(random_state=5)
438
+ >>> clustering.labels_
439
+ array([0, 0, 0, 1, 1, 1])
440
+ >>> clustering.predict([[0, 0], [4, 4]])
441
+ array([0, 1])
442
+ >>> clustering.cluster_centers_
443
+ array([[1, 2],
444
+ [4, 2]])
445
+ """
446
+
447
+ _parameter_constraints: dict = {
448
+ "damping": [Interval(Real, 0.5, 1.0, closed="left")],
449
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
450
+ "convergence_iter": [Interval(Integral, 1, None, closed="left")],
451
+ "copy": ["boolean"],
452
+ "preference": [
453
+ "array-like",
454
+ Interval(Real, None, None, closed="neither"),
455
+ None,
456
+ ],
457
+ "affinity": [StrOptions({"euclidean", "precomputed"})],
458
+ "verbose": ["verbose"],
459
+ "random_state": ["random_state"],
460
+ }
461
+
462
+ def __init__(
463
+ self,
464
+ *,
465
+ damping=0.5,
466
+ max_iter=200,
467
+ convergence_iter=15,
468
+ copy=True,
469
+ preference=None,
470
+ affinity="euclidean",
471
+ verbose=False,
472
+ random_state=None,
473
+ ):
474
+ self.damping = damping
475
+ self.max_iter = max_iter
476
+ self.convergence_iter = convergence_iter
477
+ self.copy = copy
478
+ self.verbose = verbose
479
+ self.preference = preference
480
+ self.affinity = affinity
481
+ self.random_state = random_state
482
+
483
+ def _more_tags(self):
484
+ return {"pairwise": self.affinity == "precomputed"}
485
+
486
+ @_fit_context(prefer_skip_nested_validation=True)
487
+ def fit(self, X, y=None):
488
+ """Fit the clustering from features, or affinity matrix.
489
+
490
+ Parameters
491
+ ----------
492
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
493
+ array-like of shape (n_samples, n_samples)
494
+ Training instances to cluster, or similarities / affinities between
495
+ instances if ``affinity='precomputed'``. If a sparse feature matrix
496
+ is provided, it will be converted into a sparse ``csr_matrix``.
497
+
498
+ y : Ignored
499
+ Not used, present here for API consistency by convention.
500
+
501
+ Returns
502
+ -------
503
+ self
504
+ Returns the instance itself.
505
+ """
506
+ if self.affinity == "precomputed":
507
+ accept_sparse = False
508
+ else:
509
+ accept_sparse = "csr"
510
+ X = self._validate_data(X, accept_sparse=accept_sparse)
511
+ if self.affinity == "precomputed":
512
+ self.affinity_matrix_ = X.copy() if self.copy else X
513
+ else: # self.affinity == "euclidean"
514
+ self.affinity_matrix_ = -euclidean_distances(X, squared=True)
515
+
516
+ if self.affinity_matrix_.shape[0] != self.affinity_matrix_.shape[1]:
517
+ raise ValueError(
518
+ "The matrix of similarities must be a square array. "
519
+ f"Got {self.affinity_matrix_.shape} instead."
520
+ )
521
+
522
+ if self.preference is None:
523
+ preference = np.median(self.affinity_matrix_)
524
+ else:
525
+ preference = self.preference
526
+ preference = np.asarray(preference)
527
+
528
+ random_state = check_random_state(self.random_state)
529
+
530
+ (
531
+ self.cluster_centers_indices_,
532
+ self.labels_,
533
+ self.n_iter_,
534
+ ) = _affinity_propagation(
535
+ self.affinity_matrix_,
536
+ max_iter=self.max_iter,
537
+ convergence_iter=self.convergence_iter,
538
+ preference=preference,
539
+ damping=self.damping,
540
+ verbose=self.verbose,
541
+ return_n_iter=True,
542
+ random_state=random_state,
543
+ )
544
+
545
+ if self.affinity != "precomputed":
546
+ self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
547
+
548
+ return self
549
+
550
+ def predict(self, X):
551
+ """Predict the closest cluster each sample in X belongs to.
552
+
553
+ Parameters
554
+ ----------
555
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
556
+ New data to predict. If a sparse matrix is provided, it will be
557
+ converted into a sparse ``csr_matrix``.
558
+
559
+ Returns
560
+ -------
561
+ labels : ndarray of shape (n_samples,)
562
+ Cluster labels.
563
+ """
564
+ check_is_fitted(self)
565
+ X = self._validate_data(X, reset=False, accept_sparse="csr")
566
+ if not hasattr(self, "cluster_centers_"):
567
+ raise ValueError(
568
+ "Predict method is not supported when affinity='precomputed'."
569
+ )
570
+
571
+ if self.cluster_centers_.shape[0] > 0:
572
+ with config_context(assume_finite=True):
573
+ return pairwise_distances_argmin(X, self.cluster_centers_)
574
+ else:
575
+ warnings.warn(
576
+ (
577
+ "This model does not have any cluster centers "
578
+ "because affinity propagation did not converge. "
579
+ "Labeling every sample as '-1'."
580
+ ),
581
+ ConvergenceWarning,
582
+ )
583
+ return np.array([-1] * X.shape[0])
584
+
585
+ def fit_predict(self, X, y=None):
586
+ """Fit clustering from features/affinity matrix; return cluster labels.
587
+
588
+ Parameters
589
+ ----------
590
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
591
+ array-like of shape (n_samples, n_samples)
592
+ Training instances to cluster, or similarities / affinities between
593
+ instances if ``affinity='precomputed'``. If a sparse feature matrix
594
+ is provided, it will be converted into a sparse ``csr_matrix``.
595
+
596
+ y : Ignored
597
+ Not used, present here for API consistency by convention.
598
+
599
+ Returns
600
+ -------
601
+ labels : ndarray of shape (n_samples,)
602
+ Cluster labels.
603
+ """
604
+ return super().fit_predict(X, y)
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_agglomerative.py ADDED
@@ -0,0 +1,1336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Hierarchical Agglomerative Clustering
2
+
3
+ These routines perform some hierarchical agglomerative clustering of some
4
+ input data.
5
+
6
+ Authors : Vincent Michel, Bertrand Thirion, Alexandre Gramfort,
7
+ Gael Varoquaux
8
+ License: BSD 3 clause
9
+ """
10
+ import warnings
11
+ from heapq import heapify, heappop, heappush, heappushpop
12
+ from numbers import Integral, Real
13
+
14
+ import numpy as np
15
+ from scipy import sparse
16
+ from scipy.sparse.csgraph import connected_components
17
+
18
+ from ..base import (
19
+ BaseEstimator,
20
+ ClassNamePrefixFeaturesOutMixin,
21
+ ClusterMixin,
22
+ _fit_context,
23
+ )
24
+ from ..metrics import DistanceMetric
25
+ from ..metrics._dist_metrics import METRIC_MAPPING64
26
+ from ..metrics.pairwise import _VALID_METRICS, paired_distances
27
+ from ..utils import check_array
28
+ from ..utils._fast_dict import IntFloatDict
29
+ from ..utils._param_validation import (
30
+ HasMethods,
31
+ Hidden,
32
+ Interval,
33
+ StrOptions,
34
+ validate_params,
35
+ )
36
+ from ..utils.graph import _fix_connected_components
37
+ from ..utils.validation import check_memory
38
+
39
+ # mypy error: Module 'sklearn.cluster' has no attribute '_hierarchical_fast'
40
+ from . import _hierarchical_fast as _hierarchical # type: ignore
41
+ from ._feature_agglomeration import AgglomerationTransform
42
+
43
+ ###############################################################################
44
+ # For non fully-connected graphs
45
+
46
+
47
+ def _fix_connectivity(X, connectivity, affinity):
48
+ """
49
+ Fixes the connectivity matrix.
50
+
51
+ The different steps are:
52
+
53
+ - copies it
54
+ - makes it symmetric
55
+ - converts it to LIL if necessary
56
+ - completes it if necessary.
57
+
58
+ Parameters
59
+ ----------
60
+ X : array-like of shape (n_samples, n_features)
61
+ Feature matrix representing `n_samples` samples to be clustered.
62
+
63
+ connectivity : sparse matrix, default=None
64
+ Connectivity matrix. Defines for each sample the neighboring samples
65
+ following a given structure of the data. The matrix is assumed to
66
+ be symmetric and only the upper triangular half is used.
67
+ Default is `None`, i.e, the Ward algorithm is unstructured.
68
+
69
+ affinity : {"euclidean", "precomputed"}, default="euclidean"
70
+ Which affinity to use. At the moment `precomputed` and
71
+ ``euclidean`` are supported. `euclidean` uses the
72
+ negative squared Euclidean distance between points.
73
+
74
+ Returns
75
+ -------
76
+ connectivity : sparse matrix
77
+ The fixed connectivity matrix.
78
+
79
+ n_connected_components : int
80
+ The number of connected components in the graph.
81
+ """
82
+ n_samples = X.shape[0]
83
+ if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples:
84
+ raise ValueError(
85
+ "Wrong shape for connectivity matrix: %s when X is %s"
86
+ % (connectivity.shape, X.shape)
87
+ )
88
+
89
+ # Make the connectivity matrix symmetric:
90
+ connectivity = connectivity + connectivity.T
91
+
92
+ # Convert connectivity matrix to LIL
93
+ if not sparse.issparse(connectivity):
94
+ connectivity = sparse.lil_matrix(connectivity)
95
+
96
+ # `connectivity` is a sparse matrix at this point
97
+ if connectivity.format != "lil":
98
+ connectivity = connectivity.tolil()
99
+
100
+ # Compute the number of nodes
101
+ n_connected_components, labels = connected_components(connectivity)
102
+
103
+ if n_connected_components > 1:
104
+ warnings.warn(
105
+ "the number of connected components of the "
106
+ "connectivity matrix is %d > 1. Completing it to avoid "
107
+ "stopping the tree early." % n_connected_components,
108
+ stacklevel=2,
109
+ )
110
+ # XXX: Can we do without completing the matrix?
111
+ connectivity = _fix_connected_components(
112
+ X=X,
113
+ graph=connectivity,
114
+ n_connected_components=n_connected_components,
115
+ component_labels=labels,
116
+ metric=affinity,
117
+ mode="connectivity",
118
+ )
119
+
120
+ return connectivity, n_connected_components
121
+
122
+
123
+ def _single_linkage_tree(
124
+ connectivity,
125
+ n_samples,
126
+ n_nodes,
127
+ n_clusters,
128
+ n_connected_components,
129
+ return_distance,
130
+ ):
131
+ """
132
+ Perform single linkage clustering on sparse data via the minimum
133
+ spanning tree from scipy.sparse.csgraph, then using union-find to label.
134
+ The parent array is then generated by walking through the tree.
135
+ """
136
+ from scipy.sparse.csgraph import minimum_spanning_tree
137
+
138
+ # explicitly cast connectivity to ensure safety
139
+ connectivity = connectivity.astype(np.float64, copy=False)
140
+
141
+ # Ensure zero distances aren't ignored by setting them to "epsilon"
142
+ epsilon_value = np.finfo(dtype=connectivity.data.dtype).eps
143
+ connectivity.data[connectivity.data == 0] = epsilon_value
144
+
145
+ # Use scipy.sparse.csgraph to generate a minimum spanning tree
146
+ mst = minimum_spanning_tree(connectivity.tocsr())
147
+
148
+ # Convert the graph to scipy.cluster.hierarchy array format
149
+ mst = mst.tocoo()
150
+
151
+ # Undo the epsilon values
152
+ mst.data[mst.data == epsilon_value] = 0
153
+
154
+ mst_array = np.vstack([mst.row, mst.col, mst.data]).T
155
+
156
+ # Sort edges of the min_spanning_tree by weight
157
+ mst_array = mst_array[np.argsort(mst_array.T[2], kind="mergesort"), :]
158
+
159
+ # Convert edge list into standard hierarchical clustering format
160
+ single_linkage_tree = _hierarchical._single_linkage_label(mst_array)
161
+ children_ = single_linkage_tree[:, :2].astype(int)
162
+
163
+ # Compute parents
164
+ parent = np.arange(n_nodes, dtype=np.intp)
165
+ for i, (left, right) in enumerate(children_, n_samples):
166
+ if n_clusters is not None and i >= n_nodes:
167
+ break
168
+ if left < n_nodes:
169
+ parent[left] = i
170
+ if right < n_nodes:
171
+ parent[right] = i
172
+
173
+ if return_distance:
174
+ distances = single_linkage_tree[:, 2]
175
+ return children_, n_connected_components, n_samples, parent, distances
176
+ return children_, n_connected_components, n_samples, parent
177
+
178
+
179
+ ###############################################################################
180
+ # Hierarchical tree building functions
181
+
182
+
183
+ @validate_params(
184
+ {
185
+ "X": ["array-like"],
186
+ "connectivity": ["array-like", "sparse matrix", None],
187
+ "n_clusters": [Interval(Integral, 1, None, closed="left"), None],
188
+ "return_distance": ["boolean"],
189
+ },
190
+ prefer_skip_nested_validation=True,
191
+ )
192
+ def ward_tree(X, *, connectivity=None, n_clusters=None, return_distance=False):
193
+ """Ward clustering based on a Feature matrix.
194
+
195
+ Recursively merges the pair of clusters that minimally increases
196
+ within-cluster variance.
197
+
198
+ The inertia matrix uses a Heapq-based representation.
199
+
200
+ This is the structured version, that takes into account some topological
201
+ structure between samples.
202
+
203
+ Read more in the :ref:`User Guide <hierarchical_clustering>`.
204
+
205
+ Parameters
206
+ ----------
207
+ X : array-like of shape (n_samples, n_features)
208
+ Feature matrix representing `n_samples` samples to be clustered.
209
+
210
+ connectivity : {array-like, sparse matrix}, default=None
211
+ Connectivity matrix. Defines for each sample the neighboring samples
212
+ following a given structure of the data. The matrix is assumed to
213
+ be symmetric and only the upper triangular half is used.
214
+ Default is None, i.e, the Ward algorithm is unstructured.
215
+
216
+ n_clusters : int, default=None
217
+ `n_clusters` should be less than `n_samples`. Stop early the
218
+ construction of the tree at `n_clusters.` This is useful to decrease
219
+ computation time if the number of clusters is not small compared to the
220
+ number of samples. In this case, the complete tree is not computed, thus
221
+ the 'children' output is of limited use, and the 'parents' output should
222
+ rather be used. This option is valid only when specifying a connectivity
223
+ matrix.
224
+
225
+ return_distance : bool, default=False
226
+ If `True`, return the distance between the clusters.
227
+
228
+ Returns
229
+ -------
230
+ children : ndarray of shape (n_nodes-1, 2)
231
+ The children of each non-leaf node. Values less than `n_samples`
232
+ correspond to leaves of the tree which are the original samples.
233
+ A node `i` greater than or equal to `n_samples` is a non-leaf
234
+ node and has children `children_[i - n_samples]`. Alternatively
235
+ at the i-th iteration, children[i][0] and children[i][1]
236
+ are merged to form node `n_samples + i`.
237
+
238
+ n_connected_components : int
239
+ The number of connected components in the graph.
240
+
241
+ n_leaves : int
242
+ The number of leaves in the tree.
243
+
244
+ parents : ndarray of shape (n_nodes,) or None
245
+ The parent of each node. Only returned when a connectivity matrix
246
+ is specified, elsewhere 'None' is returned.
247
+
248
+ distances : ndarray of shape (n_nodes-1,)
249
+ Only returned if `return_distance` is set to `True` (for compatibility).
250
+ The distances between the centers of the nodes. `distances[i]`
251
+ corresponds to a weighted Euclidean distance between
252
+ the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to
253
+ leaves of the tree, then `distances[i]` is their unweighted Euclidean
254
+ distance. Distances are updated in the following way
255
+ (from scipy.hierarchy.linkage):
256
+
257
+ The new entry :math:`d(u,v)` is computed as follows,
258
+
259
+ .. math::
260
+
261
+ d(u,v) = \\sqrt{\\frac{|v|+|s|}
262
+ {T}d(v,s)^2
263
+ + \\frac{|v|+|t|}
264
+ {T}d(v,t)^2
265
+ - \\frac{|v|}
266
+ {T}d(s,t)^2}
267
+
268
+ where :math:`u` is the newly joined cluster consisting of
269
+ clusters :math:`s` and :math:`t`, :math:`v` is an unused
270
+ cluster in the forest, :math:`T=|v|+|s|+|t|`, and
271
+ :math:`|*|` is the cardinality of its argument. This is also
272
+ known as the incremental algorithm.
273
+
274
+ Examples
275
+ --------
276
+ >>> import numpy as np
277
+ >>> from sklearn.cluster import ward_tree
278
+ >>> X = np.array([[1, 2], [1, 4], [1, 0],
279
+ ... [4, 2], [4, 4], [4, 0]])
280
+ >>> children, n_connected_components, n_leaves, parents = ward_tree(X)
281
+ >>> children
282
+ array([[0, 1],
283
+ [3, 5],
284
+ [2, 6],
285
+ [4, 7],
286
+ [8, 9]])
287
+ >>> n_connected_components
288
+ 1
289
+ >>> n_leaves
290
+ 6
291
+ """
292
+ X = np.asarray(X)
293
+ if X.ndim == 1:
294
+ X = np.reshape(X, (-1, 1))
295
+ n_samples, n_features = X.shape
296
+
297
+ if connectivity is None:
298
+ from scipy.cluster import hierarchy # imports PIL
299
+
300
+ if n_clusters is not None:
301
+ warnings.warn(
302
+ (
303
+ "Partial build of the tree is implemented "
304
+ "only for structured clustering (i.e. with "
305
+ "explicit connectivity). The algorithm "
306
+ "will build the full tree and only "
307
+ "retain the lower branches required "
308
+ "for the specified number of clusters"
309
+ ),
310
+ stacklevel=2,
311
+ )
312
+ X = np.require(X, requirements="W")
313
+ out = hierarchy.ward(X)
314
+ children_ = out[:, :2].astype(np.intp)
315
+
316
+ if return_distance:
317
+ distances = out[:, 2]
318
+ return children_, 1, n_samples, None, distances
319
+ else:
320
+ return children_, 1, n_samples, None
321
+
322
+ connectivity, n_connected_components = _fix_connectivity(
323
+ X, connectivity, affinity="euclidean"
324
+ )
325
+ if n_clusters is None:
326
+ n_nodes = 2 * n_samples - 1
327
+ else:
328
+ if n_clusters > n_samples:
329
+ raise ValueError(
330
+ "Cannot provide more clusters than samples. "
331
+ "%i n_clusters was asked, and there are %i "
332
+ "samples." % (n_clusters, n_samples)
333
+ )
334
+ n_nodes = 2 * n_samples - n_clusters
335
+
336
+ # create inertia matrix
337
+ coord_row = []
338
+ coord_col = []
339
+ A = []
340
+ for ind, row in enumerate(connectivity.rows):
341
+ A.append(row)
342
+ # We keep only the upper triangular for the moments
343
+ # Generator expressions are faster than arrays on the following
344
+ row = [i for i in row if i < ind]
345
+ coord_row.extend(
346
+ len(row)
347
+ * [
348
+ ind,
349
+ ]
350
+ )
351
+ coord_col.extend(row)
352
+
353
+ coord_row = np.array(coord_row, dtype=np.intp, order="C")
354
+ coord_col = np.array(coord_col, dtype=np.intp, order="C")
355
+
356
+ # build moments as a list
357
+ moments_1 = np.zeros(n_nodes, order="C")
358
+ moments_1[:n_samples] = 1
359
+ moments_2 = np.zeros((n_nodes, n_features), order="C")
360
+ moments_2[:n_samples] = X
361
+ inertia = np.empty(len(coord_row), dtype=np.float64, order="C")
362
+ _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, inertia)
363
+ inertia = list(zip(inertia, coord_row, coord_col))
364
+ heapify(inertia)
365
+
366
+ # prepare the main fields
367
+ parent = np.arange(n_nodes, dtype=np.intp)
368
+ used_node = np.ones(n_nodes, dtype=bool)
369
+ children = []
370
+ if return_distance:
371
+ distances = np.empty(n_nodes - n_samples)
372
+
373
+ not_visited = np.empty(n_nodes, dtype=bool, order="C")
374
+
375
+ # recursive merge loop
376
+ for k in range(n_samples, n_nodes):
377
+ # identify the merge
378
+ while True:
379
+ inert, i, j = heappop(inertia)
380
+ if used_node[i] and used_node[j]:
381
+ break
382
+ parent[i], parent[j] = k, k
383
+ children.append((i, j))
384
+ used_node[i] = used_node[j] = False
385
+ if return_distance: # store inertia value
386
+ distances[k - n_samples] = inert
387
+
388
+ # update the moments
389
+ moments_1[k] = moments_1[i] + moments_1[j]
390
+ moments_2[k] = moments_2[i] + moments_2[j]
391
+
392
+ # update the structure matrix A and the inertia matrix
393
+ coord_col = []
394
+ not_visited.fill(1)
395
+ not_visited[k] = 0
396
+ _hierarchical._get_parents(A[i], coord_col, parent, not_visited)
397
+ _hierarchical._get_parents(A[j], coord_col, parent, not_visited)
398
+ # List comprehension is faster than a for loop
399
+ [A[col].append(k) for col in coord_col]
400
+ A.append(coord_col)
401
+ coord_col = np.array(coord_col, dtype=np.intp, order="C")
402
+ coord_row = np.empty(coord_col.shape, dtype=np.intp, order="C")
403
+ coord_row.fill(k)
404
+ n_additions = len(coord_row)
405
+ ini = np.empty(n_additions, dtype=np.float64, order="C")
406
+
407
+ _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, ini)
408
+
409
+ # List comprehension is faster than a for loop
410
+ [heappush(inertia, (ini[idx], k, coord_col[idx])) for idx in range(n_additions)]
411
+
412
+ # Separate leaves in children (empty lists up to now)
413
+ n_leaves = n_samples
414
+ # sort children to get consistent output with unstructured version
415
+ children = [c[::-1] for c in children]
416
+ children = np.array(children) # return numpy array for efficient caching
417
+
418
+ if return_distance:
419
+ # 2 is scaling factor to compare w/ unstructured version
420
+ distances = np.sqrt(2.0 * distances)
421
+ return children, n_connected_components, n_leaves, parent, distances
422
+ else:
423
+ return children, n_connected_components, n_leaves, parent
424
+
425
+
426
+ # single average and complete linkage
427
+ def linkage_tree(
428
+ X,
429
+ connectivity=None,
430
+ n_clusters=None,
431
+ linkage="complete",
432
+ affinity="euclidean",
433
+ return_distance=False,
434
+ ):
435
+ """Linkage agglomerative clustering based on a Feature matrix.
436
+
437
+ The inertia matrix uses a Heapq-based representation.
438
+
439
+ This is the structured version, that takes into account some topological
440
+ structure between samples.
441
+
442
+ Read more in the :ref:`User Guide <hierarchical_clustering>`.
443
+
444
+ Parameters
445
+ ----------
446
+ X : array-like of shape (n_samples, n_features)
447
+ Feature matrix representing `n_samples` samples to be clustered.
448
+
449
+ connectivity : sparse matrix, default=None
450
+ Connectivity matrix. Defines for each sample the neighboring samples
451
+ following a given structure of the data. The matrix is assumed to
452
+ be symmetric and only the upper triangular half is used.
453
+ Default is `None`, i.e, the Ward algorithm is unstructured.
454
+
455
+ n_clusters : int, default=None
456
+ Stop early the construction of the tree at `n_clusters`. This is
457
+ useful to decrease computation time if the number of clusters is
458
+ not small compared to the number of samples. In this case, the
459
+ complete tree is not computed, thus the 'children' output is of
460
+ limited use, and the 'parents' output should rather be used.
461
+ This option is valid only when specifying a connectivity matrix.
462
+
463
+ linkage : {"average", "complete", "single"}, default="complete"
464
+ Which linkage criteria to use. The linkage criterion determines which
465
+ distance to use between sets of observation.
466
+ - "average" uses the average of the distances of each observation of
467
+ the two sets.
468
+ - "complete" or maximum linkage uses the maximum distances between
469
+ all observations of the two sets.
470
+ - "single" uses the minimum of the distances between all
471
+ observations of the two sets.
472
+
473
+ affinity : str or callable, default='euclidean'
474
+ Which metric to use. Can be 'euclidean', 'manhattan', or any
475
+ distance known to paired distance (see metric.pairwise).
476
+
477
+ return_distance : bool, default=False
478
+ Whether or not to return the distances between the clusters.
479
+
480
+ Returns
481
+ -------
482
+ children : ndarray of shape (n_nodes-1, 2)
483
+ The children of each non-leaf node. Values less than `n_samples`
484
+ correspond to leaves of the tree which are the original samples.
485
+ A node `i` greater than or equal to `n_samples` is a non-leaf
486
+ node and has children `children_[i - n_samples]`. Alternatively
487
+ at the i-th iteration, children[i][0] and children[i][1]
488
+ are merged to form node `n_samples + i`.
489
+
490
+ n_connected_components : int
491
+ The number of connected components in the graph.
492
+
493
+ n_leaves : int
494
+ The number of leaves in the tree.
495
+
496
+ parents : ndarray of shape (n_nodes, ) or None
497
+ The parent of each node. Only returned when a connectivity matrix
498
+ is specified, elsewhere 'None' is returned.
499
+
500
+ distances : ndarray of shape (n_nodes-1,)
501
+ Returned when `return_distance` is set to `True`.
502
+
503
+ distances[i] refers to the distance between children[i][0] and
504
+ children[i][1] when they are merged.
505
+
506
+ See Also
507
+ --------
508
+ ward_tree : Hierarchical clustering with ward linkage.
509
+ """
510
+ X = np.asarray(X)
511
+ if X.ndim == 1:
512
+ X = np.reshape(X, (-1, 1))
513
+ n_samples, n_features = X.shape
514
+
515
+ linkage_choices = {
516
+ "complete": _hierarchical.max_merge,
517
+ "average": _hierarchical.average_merge,
518
+ "single": None,
519
+ } # Single linkage is handled differently
520
+ try:
521
+ join_func = linkage_choices[linkage]
522
+ except KeyError as e:
523
+ raise ValueError(
524
+ "Unknown linkage option, linkage should be one of %s, but %s was given"
525
+ % (linkage_choices.keys(), linkage)
526
+ ) from e
527
+
528
+ if affinity == "cosine" and np.any(~np.any(X, axis=1)):
529
+ raise ValueError("Cosine affinity cannot be used when X contains zero vectors")
530
+
531
+ if connectivity is None:
532
+ from scipy.cluster import hierarchy # imports PIL
533
+
534
+ if n_clusters is not None:
535
+ warnings.warn(
536
+ (
537
+ "Partial build of the tree is implemented "
538
+ "only for structured clustering (i.e. with "
539
+ "explicit connectivity). The algorithm "
540
+ "will build the full tree and only "
541
+ "retain the lower branches required "
542
+ "for the specified number of clusters"
543
+ ),
544
+ stacklevel=2,
545
+ )
546
+
547
+ if affinity == "precomputed":
548
+ # for the linkage function of hierarchy to work on precomputed
549
+ # data, provide as first argument an ndarray of the shape returned
550
+ # by sklearn.metrics.pairwise_distances.
551
+ if X.shape[0] != X.shape[1]:
552
+ raise ValueError(
553
+ f"Distance matrix should be square, got matrix of shape {X.shape}"
554
+ )
555
+ i, j = np.triu_indices(X.shape[0], k=1)
556
+ X = X[i, j]
557
+ elif affinity == "l2":
558
+ # Translate to something understood by scipy
559
+ affinity = "euclidean"
560
+ elif affinity in ("l1", "manhattan"):
561
+ affinity = "cityblock"
562
+ elif callable(affinity):
563
+ X = affinity(X)
564
+ i, j = np.triu_indices(X.shape[0], k=1)
565
+ X = X[i, j]
566
+ if (
567
+ linkage == "single"
568
+ and affinity != "precomputed"
569
+ and not callable(affinity)
570
+ and affinity in METRIC_MAPPING64
571
+ ):
572
+ # We need the fast cythonized metric from neighbors
573
+ dist_metric = DistanceMetric.get_metric(affinity)
574
+
575
+ # The Cython routines used require contiguous arrays
576
+ X = np.ascontiguousarray(X, dtype=np.double)
577
+
578
+ mst = _hierarchical.mst_linkage_core(X, dist_metric)
579
+ # Sort edges of the min_spanning_tree by weight
580
+ mst = mst[np.argsort(mst.T[2], kind="mergesort"), :]
581
+
582
+ # Convert edge list into standard hierarchical clustering format
583
+ out = _hierarchical.single_linkage_label(mst)
584
+ else:
585
+ out = hierarchy.linkage(X, method=linkage, metric=affinity)
586
+ children_ = out[:, :2].astype(int, copy=False)
587
+
588
+ if return_distance:
589
+ distances = out[:, 2]
590
+ return children_, 1, n_samples, None, distances
591
+ return children_, 1, n_samples, None
592
+
593
+ connectivity, n_connected_components = _fix_connectivity(
594
+ X, connectivity, affinity=affinity
595
+ )
596
+ connectivity = connectivity.tocoo()
597
+ # Put the diagonal to zero
598
+ diag_mask = connectivity.row != connectivity.col
599
+ connectivity.row = connectivity.row[diag_mask]
600
+ connectivity.col = connectivity.col[diag_mask]
601
+ connectivity.data = connectivity.data[diag_mask]
602
+ del diag_mask
603
+
604
+ if affinity == "precomputed":
605
+ distances = X[connectivity.row, connectivity.col].astype(np.float64, copy=False)
606
+ else:
607
+ # FIXME We compute all the distances, while we could have only computed
608
+ # the "interesting" distances
609
+ distances = paired_distances(
610
+ X[connectivity.row], X[connectivity.col], metric=affinity
611
+ )
612
+ connectivity.data = distances
613
+
614
+ if n_clusters is None:
615
+ n_nodes = 2 * n_samples - 1
616
+ else:
617
+ assert n_clusters <= n_samples
618
+ n_nodes = 2 * n_samples - n_clusters
619
+
620
+ if linkage == "single":
621
+ return _single_linkage_tree(
622
+ connectivity,
623
+ n_samples,
624
+ n_nodes,
625
+ n_clusters,
626
+ n_connected_components,
627
+ return_distance,
628
+ )
629
+
630
+ if return_distance:
631
+ distances = np.empty(n_nodes - n_samples)
632
+ # create inertia heap and connection matrix
633
+ A = np.empty(n_nodes, dtype=object)
634
+ inertia = list()
635
+
636
+ # LIL seems to the best format to access the rows quickly,
637
+ # without the numpy overhead of slicing CSR indices and data.
638
+ connectivity = connectivity.tolil()
639
+ # We are storing the graph in a list of IntFloatDict
640
+ for ind, (data, row) in enumerate(zip(connectivity.data, connectivity.rows)):
641
+ A[ind] = IntFloatDict(
642
+ np.asarray(row, dtype=np.intp), np.asarray(data, dtype=np.float64)
643
+ )
644
+ # We keep only the upper triangular for the heap
645
+ # Generator expressions are faster than arrays on the following
646
+ inertia.extend(
647
+ _hierarchical.WeightedEdge(d, ind, r) for r, d in zip(row, data) if r < ind
648
+ )
649
+ del connectivity
650
+
651
+ heapify(inertia)
652
+
653
+ # prepare the main fields
654
+ parent = np.arange(n_nodes, dtype=np.intp)
655
+ used_node = np.ones(n_nodes, dtype=np.intp)
656
+ children = []
657
+
658
+ # recursive merge loop
659
+ for k in range(n_samples, n_nodes):
660
+ # identify the merge
661
+ while True:
662
+ edge = heappop(inertia)
663
+ if used_node[edge.a] and used_node[edge.b]:
664
+ break
665
+ i = edge.a
666
+ j = edge.b
667
+
668
+ if return_distance:
669
+ # store distances
670
+ distances[k - n_samples] = edge.weight
671
+
672
+ parent[i] = parent[j] = k
673
+ children.append((i, j))
674
+ # Keep track of the number of elements per cluster
675
+ n_i = used_node[i]
676
+ n_j = used_node[j]
677
+ used_node[k] = n_i + n_j
678
+ used_node[i] = used_node[j] = False
679
+
680
+ # update the structure matrix A and the inertia matrix
681
+ # a clever 'min', or 'max' operation between A[i] and A[j]
682
+ coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
683
+ for col, d in coord_col:
684
+ A[col].append(k, d)
685
+ # Here we use the information from coord_col (containing the
686
+ # distances) to update the heap
687
+ heappush(inertia, _hierarchical.WeightedEdge(d, k, col))
688
+ A[k] = coord_col
689
+ # Clear A[i] and A[j] to save memory
690
+ A[i] = A[j] = 0
691
+
692
+ # Separate leaves in children (empty lists up to now)
693
+ n_leaves = n_samples
694
+
695
+ # # return numpy array for efficient caching
696
+ children = np.array(children)[:, ::-1]
697
+
698
+ if return_distance:
699
+ return children, n_connected_components, n_leaves, parent, distances
700
+ return children, n_connected_components, n_leaves, parent
701
+
702
+
703
+ # Matching names to tree-building strategies
704
+ def _complete_linkage(*args, **kwargs):
705
+ kwargs["linkage"] = "complete"
706
+ return linkage_tree(*args, **kwargs)
707
+
708
+
709
+ def _average_linkage(*args, **kwargs):
710
+ kwargs["linkage"] = "average"
711
+ return linkage_tree(*args, **kwargs)
712
+
713
+
714
+ def _single_linkage(*args, **kwargs):
715
+ kwargs["linkage"] = "single"
716
+ return linkage_tree(*args, **kwargs)
717
+
718
+
719
+ _TREE_BUILDERS = dict(
720
+ ward=ward_tree,
721
+ complete=_complete_linkage,
722
+ average=_average_linkage,
723
+ single=_single_linkage,
724
+ )
725
+
726
+ ###############################################################################
727
+ # Functions for cutting hierarchical clustering tree
728
+
729
+
730
+ def _hc_cut(n_clusters, children, n_leaves):
731
+ """Function cutting the ward tree for a given number of clusters.
732
+
733
+ Parameters
734
+ ----------
735
+ n_clusters : int or ndarray
736
+ The number of clusters to form.
737
+
738
+ children : ndarray of shape (n_nodes-1, 2)
739
+ The children of each non-leaf node. Values less than `n_samples`
740
+ correspond to leaves of the tree which are the original samples.
741
+ A node `i` greater than or equal to `n_samples` is a non-leaf
742
+ node and has children `children_[i - n_samples]`. Alternatively
743
+ at the i-th iteration, children[i][0] and children[i][1]
744
+ are merged to form node `n_samples + i`.
745
+
746
+ n_leaves : int
747
+ Number of leaves of the tree.
748
+
749
+ Returns
750
+ -------
751
+ labels : array [n_samples]
752
+ Cluster labels for each point.
753
+ """
754
+ if n_clusters > n_leaves:
755
+ raise ValueError(
756
+ "Cannot extract more clusters than samples: "
757
+ "%s clusters where given for a tree with %s leaves."
758
+ % (n_clusters, n_leaves)
759
+ )
760
+ # In this function, we store nodes as a heap to avoid recomputing
761
+ # the max of the nodes: the first element is always the smallest
762
+ # We use negated indices as heaps work on smallest elements, and we
763
+ # are interested in largest elements
764
+ # children[-1] is the root of the tree
765
+ nodes = [-(max(children[-1]) + 1)]
766
+ for _ in range(n_clusters - 1):
767
+ # As we have a heap, nodes[0] is the smallest element
768
+ these_children = children[-nodes[0] - n_leaves]
769
+ # Insert the 2 children and remove the largest node
770
+ heappush(nodes, -these_children[0])
771
+ heappushpop(nodes, -these_children[1])
772
+ label = np.zeros(n_leaves, dtype=np.intp)
773
+ for i, node in enumerate(nodes):
774
+ label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
775
+ return label
776
+
777
+
778
+ ###############################################################################
779
+
780
+
781
+ class AgglomerativeClustering(ClusterMixin, BaseEstimator):
782
+ """
783
+ Agglomerative Clustering.
784
+
785
+ Recursively merges pair of clusters of sample data; uses linkage distance.
786
+
787
+ Read more in the :ref:`User Guide <hierarchical_clustering>`.
788
+
789
+ Parameters
790
+ ----------
791
+ n_clusters : int or None, default=2
792
+ The number of clusters to find. It must be ``None`` if
793
+ ``distance_threshold`` is not ``None``.
794
+
795
+ metric : str or callable, default="euclidean"
796
+ Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
797
+ "manhattan", "cosine", or "precomputed". If linkage is "ward", only
798
+ "euclidean" is accepted. If "precomputed", a distance matrix is needed
799
+ as input for the fit method.
800
+
801
+ .. versionadded:: 1.2
802
+
803
+ .. deprecated:: 1.4
804
+ `metric=None` is deprecated in 1.4 and will be removed in 1.6.
805
+ Let `metric` be the default value (i.e. `"euclidean"`) instead.
806
+
807
+ memory : str or object with the joblib.Memory interface, default=None
808
+ Used to cache the output of the computation of the tree.
809
+ By default, no caching is done. If a string is given, it is the
810
+ path to the caching directory.
811
+
812
+ connectivity : array-like or callable, default=None
813
+ Connectivity matrix. Defines for each sample the neighboring
814
+ samples following a given structure of the data.
815
+ This can be a connectivity matrix itself or a callable that transforms
816
+ the data into a connectivity matrix, such as derived from
817
+ `kneighbors_graph`. Default is ``None``, i.e, the
818
+ hierarchical clustering algorithm is unstructured.
819
+
820
+ compute_full_tree : 'auto' or bool, default='auto'
821
+ Stop early the construction of the tree at ``n_clusters``. This is
822
+ useful to decrease computation time if the number of clusters is not
823
+ small compared to the number of samples. This option is useful only
824
+ when specifying a connectivity matrix. Note also that when varying the
825
+ number of clusters and using caching, it may be advantageous to compute
826
+ the full tree. It must be ``True`` if ``distance_threshold`` is not
827
+ ``None``. By default `compute_full_tree` is "auto", which is equivalent
828
+ to `True` when `distance_threshold` is not `None` or that `n_clusters`
829
+ is inferior to the maximum between 100 or `0.02 * n_samples`.
830
+ Otherwise, "auto" is equivalent to `False`.
831
+
832
+ linkage : {'ward', 'complete', 'average', 'single'}, default='ward'
833
+ Which linkage criterion to use. The linkage criterion determines which
834
+ distance to use between sets of observation. The algorithm will merge
835
+ the pairs of cluster that minimize this criterion.
836
+
837
+ - 'ward' minimizes the variance of the clusters being merged.
838
+ - 'average' uses the average of the distances of each observation of
839
+ the two sets.
840
+ - 'complete' or 'maximum' linkage uses the maximum distances between
841
+ all observations of the two sets.
842
+ - 'single' uses the minimum of the distances between all observations
843
+ of the two sets.
844
+
845
+ .. versionadded:: 0.20
846
+ Added the 'single' option
847
+
848
+ distance_threshold : float, default=None
849
+ The linkage distance threshold at or above which clusters will not be
850
+ merged. If not ``None``, ``n_clusters`` must be ``None`` and
851
+ ``compute_full_tree`` must be ``True``.
852
+
853
+ .. versionadded:: 0.21
854
+
855
+ compute_distances : bool, default=False
856
+ Computes distances between clusters even if `distance_threshold` is not
857
+ used. This can be used to make dendrogram visualization, but introduces
858
+ a computational and memory overhead.
859
+
860
+ .. versionadded:: 0.24
861
+
862
+ Attributes
863
+ ----------
864
+ n_clusters_ : int
865
+ The number of clusters found by the algorithm. If
866
+ ``distance_threshold=None``, it will be equal to the given
867
+ ``n_clusters``.
868
+
869
+ labels_ : ndarray of shape (n_samples)
870
+ Cluster labels for each point.
871
+
872
+ n_leaves_ : int
873
+ Number of leaves in the hierarchical tree.
874
+
875
+ n_connected_components_ : int
876
+ The estimated number of connected components in the graph.
877
+
878
+ .. versionadded:: 0.21
879
+ ``n_connected_components_`` was added to replace ``n_components_``.
880
+
881
+ n_features_in_ : int
882
+ Number of features seen during :term:`fit`.
883
+
884
+ .. versionadded:: 0.24
885
+
886
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
887
+ Names of features seen during :term:`fit`. Defined only when `X`
888
+ has feature names that are all strings.
889
+
890
+ .. versionadded:: 1.0
891
+
892
+ children_ : array-like of shape (n_samples-1, 2)
893
+ The children of each non-leaf node. Values less than `n_samples`
894
+ correspond to leaves of the tree which are the original samples.
895
+ A node `i` greater than or equal to `n_samples` is a non-leaf
896
+ node and has children `children_[i - n_samples]`. Alternatively
897
+ at the i-th iteration, children[i][0] and children[i][1]
898
+ are merged to form node `n_samples + i`.
899
+
900
+ distances_ : array-like of shape (n_nodes-1,)
901
+ Distances between nodes in the corresponding place in `children_`.
902
+ Only computed if `distance_threshold` is used or `compute_distances`
903
+ is set to `True`.
904
+
905
+ See Also
906
+ --------
907
+ FeatureAgglomeration : Agglomerative clustering but for features instead of
908
+ samples.
909
+ ward_tree : Hierarchical clustering with ward linkage.
910
+
911
+ Examples
912
+ --------
913
+ >>> from sklearn.cluster import AgglomerativeClustering
914
+ >>> import numpy as np
915
+ >>> X = np.array([[1, 2], [1, 4], [1, 0],
916
+ ... [4, 2], [4, 4], [4, 0]])
917
+ >>> clustering = AgglomerativeClustering().fit(X)
918
+ >>> clustering
919
+ AgglomerativeClustering()
920
+ >>> clustering.labels_
921
+ array([1, 1, 1, 0, 0, 0])
922
+ """
923
+
924
+ _parameter_constraints: dict = {
925
+ "n_clusters": [Interval(Integral, 1, None, closed="left"), None],
926
+ "metric": [
927
+ StrOptions(set(_VALID_METRICS) | {"precomputed"}),
928
+ callable,
929
+ Hidden(None),
930
+ ],
931
+ "memory": [str, HasMethods("cache"), None],
932
+ "connectivity": ["array-like", callable, None],
933
+ "compute_full_tree": [StrOptions({"auto"}), "boolean"],
934
+ "linkage": [StrOptions(set(_TREE_BUILDERS.keys()))],
935
+ "distance_threshold": [Interval(Real, 0, None, closed="left"), None],
936
+ "compute_distances": ["boolean"],
937
+ }
938
+
939
+ def __init__(
940
+ self,
941
+ n_clusters=2,
942
+ *,
943
+ metric="euclidean",
944
+ memory=None,
945
+ connectivity=None,
946
+ compute_full_tree="auto",
947
+ linkage="ward",
948
+ distance_threshold=None,
949
+ compute_distances=False,
950
+ ):
951
+ self.n_clusters = n_clusters
952
+ self.distance_threshold = distance_threshold
953
+ self.memory = memory
954
+ self.connectivity = connectivity
955
+ self.compute_full_tree = compute_full_tree
956
+ self.linkage = linkage
957
+ self.metric = metric
958
+ self.compute_distances = compute_distances
959
+
960
+ @_fit_context(prefer_skip_nested_validation=True)
961
+ def fit(self, X, y=None):
962
+ """Fit the hierarchical clustering from features, or distance matrix.
963
+
964
+ Parameters
965
+ ----------
966
+ X : array-like, shape (n_samples, n_features) or \
967
+ (n_samples, n_samples)
968
+ Training instances to cluster, or distances between instances if
969
+ ``metric='precomputed'``.
970
+
971
+ y : Ignored
972
+ Not used, present here for API consistency by convention.
973
+
974
+ Returns
975
+ -------
976
+ self : object
977
+ Returns the fitted instance.
978
+ """
979
+ X = self._validate_data(X, ensure_min_samples=2)
980
+ return self._fit(X)
981
+
982
+ def _fit(self, X):
983
+ """Fit without validation
984
+
985
+ Parameters
986
+ ----------
987
+ X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
988
+ Training instances to cluster, or distances between instances if
989
+ ``affinity='precomputed'``.
990
+
991
+ Returns
992
+ -------
993
+ self : object
994
+ Returns the fitted instance.
995
+ """
996
+ memory = check_memory(self.memory)
997
+
998
+ # TODO(1.6): remove in 1.6
999
+ if self.metric is None:
1000
+ warnings.warn(
1001
+ (
1002
+ "`metric=None` is deprecated in version 1.4 and will be removed in "
1003
+ "version 1.6. Let `metric` be the default value "
1004
+ "(i.e. `'euclidean'`) instead."
1005
+ ),
1006
+ FutureWarning,
1007
+ )
1008
+ self._metric = "euclidean"
1009
+ else:
1010
+ self._metric = self.metric
1011
+
1012
+ if not ((self.n_clusters is None) ^ (self.distance_threshold is None)):
1013
+ raise ValueError(
1014
+ "Exactly one of n_clusters and "
1015
+ "distance_threshold has to be set, and the other "
1016
+ "needs to be None."
1017
+ )
1018
+
1019
+ if self.distance_threshold is not None and not self.compute_full_tree:
1020
+ raise ValueError(
1021
+ "compute_full_tree must be True if distance_threshold is set."
1022
+ )
1023
+
1024
+ if self.linkage == "ward" and self._metric != "euclidean":
1025
+ raise ValueError(
1026
+ f"{self._metric} was provided as metric. Ward can only "
1027
+ "work with euclidean distances."
1028
+ )
1029
+
1030
+ tree_builder = _TREE_BUILDERS[self.linkage]
1031
+
1032
+ connectivity = self.connectivity
1033
+ if self.connectivity is not None:
1034
+ if callable(self.connectivity):
1035
+ connectivity = self.connectivity(X)
1036
+ connectivity = check_array(
1037
+ connectivity, accept_sparse=["csr", "coo", "lil"]
1038
+ )
1039
+
1040
+ n_samples = len(X)
1041
+ compute_full_tree = self.compute_full_tree
1042
+ if self.connectivity is None:
1043
+ compute_full_tree = True
1044
+ if compute_full_tree == "auto":
1045
+ if self.distance_threshold is not None:
1046
+ compute_full_tree = True
1047
+ else:
1048
+ # Early stopping is likely to give a speed up only for
1049
+ # a large number of clusters. The actual threshold
1050
+ # implemented here is heuristic
1051
+ compute_full_tree = self.n_clusters < max(100, 0.02 * n_samples)
1052
+ n_clusters = self.n_clusters
1053
+ if compute_full_tree:
1054
+ n_clusters = None
1055
+
1056
+ # Construct the tree
1057
+ kwargs = {}
1058
+ if self.linkage != "ward":
1059
+ kwargs["linkage"] = self.linkage
1060
+ kwargs["affinity"] = self._metric
1061
+
1062
+ distance_threshold = self.distance_threshold
1063
+
1064
+ return_distance = (distance_threshold is not None) or self.compute_distances
1065
+
1066
+ out = memory.cache(tree_builder)(
1067
+ X,
1068
+ connectivity=connectivity,
1069
+ n_clusters=n_clusters,
1070
+ return_distance=return_distance,
1071
+ **kwargs,
1072
+ )
1073
+ (self.children_, self.n_connected_components_, self.n_leaves_, parents) = out[
1074
+ :4
1075
+ ]
1076
+
1077
+ if return_distance:
1078
+ self.distances_ = out[-1]
1079
+
1080
+ if self.distance_threshold is not None: # distance_threshold is used
1081
+ self.n_clusters_ = (
1082
+ np.count_nonzero(self.distances_ >= distance_threshold) + 1
1083
+ )
1084
+ else: # n_clusters is used
1085
+ self.n_clusters_ = self.n_clusters
1086
+
1087
+ # Cut the tree
1088
+ if compute_full_tree:
1089
+ self.labels_ = _hc_cut(self.n_clusters_, self.children_, self.n_leaves_)
1090
+ else:
1091
+ labels = _hierarchical.hc_get_heads(parents, copy=False)
1092
+ # copy to avoid holding a reference on the original array
1093
+ labels = np.copy(labels[:n_samples])
1094
+ # Reassign cluster numbers
1095
+ self.labels_ = np.searchsorted(np.unique(labels), labels)
1096
+ return self
1097
+
1098
+ def fit_predict(self, X, y=None):
1099
+ """Fit and return the result of each sample's clustering assignment.
1100
+
1101
+ In addition to fitting, this method also return the result of the
1102
+ clustering assignment for each sample in the training set.
1103
+
1104
+ Parameters
1105
+ ----------
1106
+ X : array-like of shape (n_samples, n_features) or \
1107
+ (n_samples, n_samples)
1108
+ Training instances to cluster, or distances between instances if
1109
+ ``affinity='precomputed'``.
1110
+
1111
+ y : Ignored
1112
+ Not used, present here for API consistency by convention.
1113
+
1114
+ Returns
1115
+ -------
1116
+ labels : ndarray of shape (n_samples,)
1117
+ Cluster labels.
1118
+ """
1119
+ return super().fit_predict(X, y)
1120
+
1121
+
1122
+ class FeatureAgglomeration(
1123
+ ClassNamePrefixFeaturesOutMixin, AgglomerativeClustering, AgglomerationTransform
1124
+ ):
1125
+ """Agglomerate features.
1126
+
1127
+ Recursively merges pair of clusters of features.
1128
+
1129
+ Read more in the :ref:`User Guide <hierarchical_clustering>`.
1130
+
1131
+ Parameters
1132
+ ----------
1133
+ n_clusters : int or None, default=2
1134
+ The number of clusters to find. It must be ``None`` if
1135
+ ``distance_threshold`` is not ``None``.
1136
+
1137
+ metric : str or callable, default="euclidean"
1138
+ Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
1139
+ "manhattan", "cosine", or "precomputed". If linkage is "ward", only
1140
+ "euclidean" is accepted. If "precomputed", a distance matrix is needed
1141
+ as input for the fit method.
1142
+
1143
+ .. versionadded:: 1.2
1144
+
1145
+ .. deprecated:: 1.4
1146
+ `metric=None` is deprecated in 1.4 and will be removed in 1.6.
1147
+ Let `metric` be the default value (i.e. `"euclidean"`) instead.
1148
+
1149
+ memory : str or object with the joblib.Memory interface, default=None
1150
+ Used to cache the output of the computation of the tree.
1151
+ By default, no caching is done. If a string is given, it is the
1152
+ path to the caching directory.
1153
+
1154
+ connectivity : array-like or callable, default=None
1155
+ Connectivity matrix. Defines for each feature the neighboring
1156
+ features following a given structure of the data.
1157
+ This can be a connectivity matrix itself or a callable that transforms
1158
+ the data into a connectivity matrix, such as derived from
1159
+ `kneighbors_graph`. Default is `None`, i.e, the
1160
+ hierarchical clustering algorithm is unstructured.
1161
+
1162
+ compute_full_tree : 'auto' or bool, default='auto'
1163
+ Stop early the construction of the tree at `n_clusters`. This is useful
1164
+ to decrease computation time if the number of clusters is not small
1165
+ compared to the number of features. This option is useful only when
1166
+ specifying a connectivity matrix. Note also that when varying the
1167
+ number of clusters and using caching, it may be advantageous to compute
1168
+ the full tree. It must be ``True`` if ``distance_threshold`` is not
1169
+ ``None``. By default `compute_full_tree` is "auto", which is equivalent
1170
+ to `True` when `distance_threshold` is not `None` or that `n_clusters`
1171
+ is inferior to the maximum between 100 or `0.02 * n_samples`.
1172
+ Otherwise, "auto" is equivalent to `False`.
1173
+
1174
+ linkage : {"ward", "complete", "average", "single"}, default="ward"
1175
+ Which linkage criterion to use. The linkage criterion determines which
1176
+ distance to use between sets of features. The algorithm will merge
1177
+ the pairs of cluster that minimize this criterion.
1178
+
1179
+ - "ward" minimizes the variance of the clusters being merged.
1180
+ - "complete" or maximum linkage uses the maximum distances between
1181
+ all features of the two sets.
1182
+ - "average" uses the average of the distances of each feature of
1183
+ the two sets.
1184
+ - "single" uses the minimum of the distances between all features
1185
+ of the two sets.
1186
+
1187
+ pooling_func : callable, default=np.mean
1188
+ This combines the values of agglomerated features into a single
1189
+ value, and should accept an array of shape [M, N] and the keyword
1190
+ argument `axis=1`, and reduce it to an array of size [M].
1191
+
1192
+ distance_threshold : float, default=None
1193
+ The linkage distance threshold at or above which clusters will not be
1194
+ merged. If not ``None``, ``n_clusters`` must be ``None`` and
1195
+ ``compute_full_tree`` must be ``True``.
1196
+
1197
+ .. versionadded:: 0.21
1198
+
1199
+ compute_distances : bool, default=False
1200
+ Computes distances between clusters even if `distance_threshold` is not
1201
+ used. This can be used to make dendrogram visualization, but introduces
1202
+ a computational and memory overhead.
1203
+
1204
+ .. versionadded:: 0.24
1205
+
1206
+ Attributes
1207
+ ----------
1208
+ n_clusters_ : int
1209
+ The number of clusters found by the algorithm. If
1210
+ ``distance_threshold=None``, it will be equal to the given
1211
+ ``n_clusters``.
1212
+
1213
+ labels_ : array-like of (n_features,)
1214
+ Cluster labels for each feature.
1215
+
1216
+ n_leaves_ : int
1217
+ Number of leaves in the hierarchical tree.
1218
+
1219
+ n_connected_components_ : int
1220
+ The estimated number of connected components in the graph.
1221
+
1222
+ .. versionadded:: 0.21
1223
+ ``n_connected_components_`` was added to replace ``n_components_``.
1224
+
1225
+ n_features_in_ : int
1226
+ Number of features seen during :term:`fit`.
1227
+
1228
+ .. versionadded:: 0.24
1229
+
1230
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1231
+ Names of features seen during :term:`fit`. Defined only when `X`
1232
+ has feature names that are all strings.
1233
+
1234
+ .. versionadded:: 1.0
1235
+
1236
+ children_ : array-like of shape (n_nodes-1, 2)
1237
+ The children of each non-leaf node. Values less than `n_features`
1238
+ correspond to leaves of the tree which are the original samples.
1239
+ A node `i` greater than or equal to `n_features` is a non-leaf
1240
+ node and has children `children_[i - n_features]`. Alternatively
1241
+ at the i-th iteration, children[i][0] and children[i][1]
1242
+ are merged to form node `n_features + i`.
1243
+
1244
+ distances_ : array-like of shape (n_nodes-1,)
1245
+ Distances between nodes in the corresponding place in `children_`.
1246
+ Only computed if `distance_threshold` is used or `compute_distances`
1247
+ is set to `True`.
1248
+
1249
+ See Also
1250
+ --------
1251
+ AgglomerativeClustering : Agglomerative clustering samples instead of
1252
+ features.
1253
+ ward_tree : Hierarchical clustering with ward linkage.
1254
+
1255
+ Examples
1256
+ --------
1257
+ >>> import numpy as np
1258
+ >>> from sklearn import datasets, cluster
1259
+ >>> digits = datasets.load_digits()
1260
+ >>> images = digits.images
1261
+ >>> X = np.reshape(images, (len(images), -1))
1262
+ >>> agglo = cluster.FeatureAgglomeration(n_clusters=32)
1263
+ >>> agglo.fit(X)
1264
+ FeatureAgglomeration(n_clusters=32)
1265
+ >>> X_reduced = agglo.transform(X)
1266
+ >>> X_reduced.shape
1267
+ (1797, 32)
1268
+ """
1269
+
1270
+ _parameter_constraints: dict = {
1271
+ "n_clusters": [Interval(Integral, 1, None, closed="left"), None],
1272
+ "metric": [
1273
+ StrOptions(set(_VALID_METRICS) | {"precomputed"}),
1274
+ callable,
1275
+ Hidden(None),
1276
+ ],
1277
+ "memory": [str, HasMethods("cache"), None],
1278
+ "connectivity": ["array-like", callable, None],
1279
+ "compute_full_tree": [StrOptions({"auto"}), "boolean"],
1280
+ "linkage": [StrOptions(set(_TREE_BUILDERS.keys()))],
1281
+ "pooling_func": [callable],
1282
+ "distance_threshold": [Interval(Real, 0, None, closed="left"), None],
1283
+ "compute_distances": ["boolean"],
1284
+ }
1285
+
1286
+ def __init__(
1287
+ self,
1288
+ n_clusters=2,
1289
+ *,
1290
+ metric="euclidean",
1291
+ memory=None,
1292
+ connectivity=None,
1293
+ compute_full_tree="auto",
1294
+ linkage="ward",
1295
+ pooling_func=np.mean,
1296
+ distance_threshold=None,
1297
+ compute_distances=False,
1298
+ ):
1299
+ super().__init__(
1300
+ n_clusters=n_clusters,
1301
+ memory=memory,
1302
+ connectivity=connectivity,
1303
+ compute_full_tree=compute_full_tree,
1304
+ linkage=linkage,
1305
+ metric=metric,
1306
+ distance_threshold=distance_threshold,
1307
+ compute_distances=compute_distances,
1308
+ )
1309
+ self.pooling_func = pooling_func
1310
+
1311
+ @_fit_context(prefer_skip_nested_validation=True)
1312
+ def fit(self, X, y=None):
1313
+ """Fit the hierarchical clustering on the data.
1314
+
1315
+ Parameters
1316
+ ----------
1317
+ X : array-like of shape (n_samples, n_features)
1318
+ The data.
1319
+
1320
+ y : Ignored
1321
+ Not used, present here for API consistency by convention.
1322
+
1323
+ Returns
1324
+ -------
1325
+ self : object
1326
+ Returns the transformer.
1327
+ """
1328
+ X = self._validate_data(X, ensure_min_features=2)
1329
+ super()._fit(X.T)
1330
+ self._n_features_out = self.n_clusters_
1331
+ return self
1332
+
1333
+ @property
1334
+ def fit_predict(self):
1335
+ """Fit and return the result of each sample's clustering assignment."""
1336
+ raise AttributeError
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_bicluster.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Spectral biclustering algorithms."""
2
+ # Authors : Kemal Eren
3
+ # License: BSD 3 clause
4
+
5
+ from abc import ABCMeta, abstractmethod
6
+ from numbers import Integral
7
+
8
+ import numpy as np
9
+ from scipy.linalg import norm
10
+ from scipy.sparse import dia_matrix, issparse
11
+ from scipy.sparse.linalg import eigsh, svds
12
+
13
+ from ..base import BaseEstimator, BiclusterMixin, _fit_context
14
+ from ..utils import check_random_state, check_scalar
15
+ from ..utils._param_validation import Interval, StrOptions
16
+ from ..utils.extmath import make_nonnegative, randomized_svd, safe_sparse_dot
17
+ from ..utils.validation import assert_all_finite
18
+ from ._kmeans import KMeans, MiniBatchKMeans
19
+
20
+ __all__ = ["SpectralCoclustering", "SpectralBiclustering"]
21
+
22
+
23
+ def _scale_normalize(X):
24
+ """Normalize ``X`` by scaling rows and columns independently.
25
+
26
+ Returns the normalized matrix and the row and column scaling
27
+ factors.
28
+ """
29
+ X = make_nonnegative(X)
30
+ row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
31
+ col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
32
+ row_diag = np.where(np.isnan(row_diag), 0, row_diag)
33
+ col_diag = np.where(np.isnan(col_diag), 0, col_diag)
34
+ if issparse(X):
35
+ n_rows, n_cols = X.shape
36
+ r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
37
+ c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
38
+ an = r * X * c
39
+ else:
40
+ an = row_diag[:, np.newaxis] * X * col_diag
41
+ return an, row_diag, col_diag
42
+
43
+
44
+ def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
45
+ """Normalize rows and columns of ``X`` simultaneously so that all
46
+ rows sum to one constant and all columns sum to a different
47
+ constant.
48
+ """
49
+ # According to paper, this can also be done more efficiently with
50
+ # deviation reduction and balancing algorithms.
51
+ X = make_nonnegative(X)
52
+ X_scaled = X
53
+ for _ in range(max_iter):
54
+ X_new, _, _ = _scale_normalize(X_scaled)
55
+ if issparse(X):
56
+ dist = norm(X_scaled.data - X.data)
57
+ else:
58
+ dist = norm(X_scaled - X_new)
59
+ X_scaled = X_new
60
+ if dist is not None and dist < tol:
61
+ break
62
+ return X_scaled
63
+
64
+
65
+ def _log_normalize(X):
66
+ """Normalize ``X`` according to Kluger's log-interactions scheme."""
67
+ X = make_nonnegative(X, min_value=1)
68
+ if issparse(X):
69
+ raise ValueError(
70
+ "Cannot compute log of a sparse matrix,"
71
+ " because log(x) diverges to -infinity as x"
72
+ " goes to 0."
73
+ )
74
+ L = np.log(X)
75
+ row_avg = L.mean(axis=1)[:, np.newaxis]
76
+ col_avg = L.mean(axis=0)
77
+ avg = L.mean()
78
+ return L - row_avg - col_avg + avg
79
+
80
+
81
+ class BaseSpectral(BiclusterMixin, BaseEstimator, metaclass=ABCMeta):
82
+ """Base class for spectral biclustering."""
83
+
84
+ _parameter_constraints: dict = {
85
+ "svd_method": [StrOptions({"randomized", "arpack"})],
86
+ "n_svd_vecs": [Interval(Integral, 0, None, closed="left"), None],
87
+ "mini_batch": ["boolean"],
88
+ "init": [StrOptions({"k-means++", "random"}), np.ndarray],
89
+ "n_init": [Interval(Integral, 1, None, closed="left")],
90
+ "random_state": ["random_state"],
91
+ }
92
+
93
+ @abstractmethod
94
+ def __init__(
95
+ self,
96
+ n_clusters=3,
97
+ svd_method="randomized",
98
+ n_svd_vecs=None,
99
+ mini_batch=False,
100
+ init="k-means++",
101
+ n_init=10,
102
+ random_state=None,
103
+ ):
104
+ self.n_clusters = n_clusters
105
+ self.svd_method = svd_method
106
+ self.n_svd_vecs = n_svd_vecs
107
+ self.mini_batch = mini_batch
108
+ self.init = init
109
+ self.n_init = n_init
110
+ self.random_state = random_state
111
+
112
+ @abstractmethod
113
+ def _check_parameters(self, n_samples):
114
+ """Validate parameters depending on the input data."""
115
+
116
+ @_fit_context(prefer_skip_nested_validation=True)
117
+ def fit(self, X, y=None):
118
+ """Create a biclustering for X.
119
+
120
+ Parameters
121
+ ----------
122
+ X : array-like of shape (n_samples, n_features)
123
+ Training data.
124
+
125
+ y : Ignored
126
+ Not used, present for API consistency by convention.
127
+
128
+ Returns
129
+ -------
130
+ self : object
131
+ SpectralBiclustering instance.
132
+ """
133
+ X = self._validate_data(X, accept_sparse="csr", dtype=np.float64)
134
+ self._check_parameters(X.shape[0])
135
+ self._fit(X)
136
+ return self
137
+
138
+ def _svd(self, array, n_components, n_discard):
139
+ """Returns first `n_components` left and right singular
140
+ vectors u and v, discarding the first `n_discard`.
141
+ """
142
+ if self.svd_method == "randomized":
143
+ kwargs = {}
144
+ if self.n_svd_vecs is not None:
145
+ kwargs["n_oversamples"] = self.n_svd_vecs
146
+ u, _, vt = randomized_svd(
147
+ array, n_components, random_state=self.random_state, **kwargs
148
+ )
149
+
150
+ elif self.svd_method == "arpack":
151
+ u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
152
+ if np.any(np.isnan(vt)):
153
+ # some eigenvalues of A * A.T are negative, causing
154
+ # sqrt() to be np.nan. This causes some vectors in vt
155
+ # to be np.nan.
156
+ A = safe_sparse_dot(array.T, array)
157
+ random_state = check_random_state(self.random_state)
158
+ # initialize with [-1,1] as in ARPACK
159
+ v0 = random_state.uniform(-1, 1, A.shape[0])
160
+ _, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
161
+ vt = v.T
162
+ if np.any(np.isnan(u)):
163
+ A = safe_sparse_dot(array, array.T)
164
+ random_state = check_random_state(self.random_state)
165
+ # initialize with [-1,1] as in ARPACK
166
+ v0 = random_state.uniform(-1, 1, A.shape[0])
167
+ _, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
168
+
169
+ assert_all_finite(u)
170
+ assert_all_finite(vt)
171
+ u = u[:, n_discard:]
172
+ vt = vt[n_discard:]
173
+ return u, vt.T
174
+
175
+ def _k_means(self, data, n_clusters):
176
+ if self.mini_batch:
177
+ model = MiniBatchKMeans(
178
+ n_clusters,
179
+ init=self.init,
180
+ n_init=self.n_init,
181
+ random_state=self.random_state,
182
+ )
183
+ else:
184
+ model = KMeans(
185
+ n_clusters,
186
+ init=self.init,
187
+ n_init=self.n_init,
188
+ random_state=self.random_state,
189
+ )
190
+ model.fit(data)
191
+ centroid = model.cluster_centers_
192
+ labels = model.labels_
193
+ return centroid, labels
194
+
195
+ def _more_tags(self):
196
+ return {
197
+ "_xfail_checks": {
198
+ "check_estimators_dtypes": "raises nan error",
199
+ "check_fit2d_1sample": "_scale_normalize fails",
200
+ "check_fit2d_1feature": "raises apply_along_axis error",
201
+ "check_estimator_sparse_data": "does not fail gracefully",
202
+ "check_methods_subset_invariance": "empty array passed inside",
203
+ "check_dont_overwrite_parameters": "empty array passed inside",
204
+ "check_fit2d_predict1d": "empty array passed inside",
205
+ }
206
+ }
207
+
208
+
209
+ class SpectralCoclustering(BaseSpectral):
210
+ """Spectral Co-Clustering algorithm (Dhillon, 2001).
211
+
212
+ Clusters rows and columns of an array `X` to solve the relaxed
213
+ normalized cut of the bipartite graph created from `X` as follows:
214
+ the edge between row vertex `i` and column vertex `j` has weight
215
+ `X[i, j]`.
216
+
217
+ The resulting bicluster structure is block-diagonal, since each
218
+ row and each column belongs to exactly one bicluster.
219
+
220
+ Supports sparse matrices, as long as they are nonnegative.
221
+
222
+ Read more in the :ref:`User Guide <spectral_coclustering>`.
223
+
224
+ Parameters
225
+ ----------
226
+ n_clusters : int, default=3
227
+ The number of biclusters to find.
228
+
229
+ svd_method : {'randomized', 'arpack'}, default='randomized'
230
+ Selects the algorithm for finding singular vectors. May be
231
+ 'randomized' or 'arpack'. If 'randomized', use
232
+ :func:`sklearn.utils.extmath.randomized_svd`, which may be faster
233
+ for large matrices. If 'arpack', use
234
+ :func:`scipy.sparse.linalg.svds`, which is more accurate, but
235
+ possibly slower in some cases.
236
+
237
+ n_svd_vecs : int, default=None
238
+ Number of vectors to use in calculating the SVD. Corresponds
239
+ to `ncv` when `svd_method=arpack` and `n_oversamples` when
240
+ `svd_method` is 'randomized`.
241
+
242
+ mini_batch : bool, default=False
243
+ Whether to use mini-batch k-means, which is faster but may get
244
+ different results.
245
+
246
+ init : {'k-means++', 'random'}, or ndarray of shape \
247
+ (n_clusters, n_features), default='k-means++'
248
+ Method for initialization of k-means algorithm; defaults to
249
+ 'k-means++'.
250
+
251
+ n_init : int, default=10
252
+ Number of random initializations that are tried with the
253
+ k-means algorithm.
254
+
255
+ If mini-batch k-means is used, the best initialization is
256
+ chosen and the algorithm runs once. Otherwise, the algorithm
257
+ is run for each initialization and the best solution chosen.
258
+
259
+ random_state : int, RandomState instance, default=None
260
+ Used for randomizing the singular value decomposition and the k-means
261
+ initialization. Use an int to make the randomness deterministic.
262
+ See :term:`Glossary <random_state>`.
263
+
264
+ Attributes
265
+ ----------
266
+ rows_ : array-like of shape (n_row_clusters, n_rows)
267
+ Results of the clustering. `rows[i, r]` is True if
268
+ cluster `i` contains row `r`. Available only after calling ``fit``.
269
+
270
+ columns_ : array-like of shape (n_column_clusters, n_columns)
271
+ Results of the clustering, like `rows`.
272
+
273
+ row_labels_ : array-like of shape (n_rows,)
274
+ The bicluster label of each row.
275
+
276
+ column_labels_ : array-like of shape (n_cols,)
277
+ The bicluster label of each column.
278
+
279
+ biclusters_ : tuple of two ndarrays
280
+ The tuple contains the `rows_` and `columns_` arrays.
281
+
282
+ n_features_in_ : int
283
+ Number of features seen during :term:`fit`.
284
+
285
+ .. versionadded:: 0.24
286
+
287
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
288
+ Names of features seen during :term:`fit`. Defined only when `X`
289
+ has feature names that are all strings.
290
+
291
+ .. versionadded:: 1.0
292
+
293
+ See Also
294
+ --------
295
+ SpectralBiclustering : Partitions rows and columns under the assumption
296
+ that the data has an underlying checkerboard structure.
297
+
298
+ References
299
+ ----------
300
+ * :doi:`Dhillon, Inderjit S, 2001. Co-clustering documents and words using
301
+ bipartite spectral graph partitioning.
302
+ <10.1145/502512.502550>`
303
+
304
+ Examples
305
+ --------
306
+ >>> from sklearn.cluster import SpectralCoclustering
307
+ >>> import numpy as np
308
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
309
+ ... [4, 7], [3, 5], [3, 6]])
310
+ >>> clustering = SpectralCoclustering(n_clusters=2, random_state=0).fit(X)
311
+ >>> clustering.row_labels_ #doctest: +SKIP
312
+ array([0, 1, 1, 0, 0, 0], dtype=int32)
313
+ >>> clustering.column_labels_ #doctest: +SKIP
314
+ array([0, 0], dtype=int32)
315
+ >>> clustering
316
+ SpectralCoclustering(n_clusters=2, random_state=0)
317
+ """
318
+
319
+ _parameter_constraints: dict = {
320
+ **BaseSpectral._parameter_constraints,
321
+ "n_clusters": [Interval(Integral, 1, None, closed="left")],
322
+ }
323
+
324
+ def __init__(
325
+ self,
326
+ n_clusters=3,
327
+ *,
328
+ svd_method="randomized",
329
+ n_svd_vecs=None,
330
+ mini_batch=False,
331
+ init="k-means++",
332
+ n_init=10,
333
+ random_state=None,
334
+ ):
335
+ super().__init__(
336
+ n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state
337
+ )
338
+
339
+ def _check_parameters(self, n_samples):
340
+ if self.n_clusters > n_samples:
341
+ raise ValueError(
342
+ f"n_clusters should be <= n_samples={n_samples}. Got"
343
+ f" {self.n_clusters} instead."
344
+ )
345
+
346
+ def _fit(self, X):
347
+ normalized_data, row_diag, col_diag = _scale_normalize(X)
348
+ n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
349
+ u, v = self._svd(normalized_data, n_sv, n_discard=1)
350
+ z = np.vstack((row_diag[:, np.newaxis] * u, col_diag[:, np.newaxis] * v))
351
+
352
+ _, labels = self._k_means(z, self.n_clusters)
353
+
354
+ n_rows = X.shape[0]
355
+ self.row_labels_ = labels[:n_rows]
356
+ self.column_labels_ = labels[n_rows:]
357
+
358
+ self.rows_ = np.vstack([self.row_labels_ == c for c in range(self.n_clusters)])
359
+ self.columns_ = np.vstack(
360
+ [self.column_labels_ == c for c in range(self.n_clusters)]
361
+ )
362
+
363
+
364
+ class SpectralBiclustering(BaseSpectral):
365
+ """Spectral biclustering (Kluger, 2003).
366
+
367
+ Partitions rows and columns under the assumption that the data has
368
+ an underlying checkerboard structure. For instance, if there are
369
+ two row partitions and three column partitions, each row will
370
+ belong to three biclusters, and each column will belong to two
371
+ biclusters. The outer product of the corresponding row and column
372
+ label vectors gives this checkerboard structure.
373
+
374
+ Read more in the :ref:`User Guide <spectral_biclustering>`.
375
+
376
+ Parameters
377
+ ----------
378
+ n_clusters : int or tuple (n_row_clusters, n_column_clusters), default=3
379
+ The number of row and column clusters in the checkerboard
380
+ structure.
381
+
382
+ method : {'bistochastic', 'scale', 'log'}, default='bistochastic'
383
+ Method of normalizing and converting singular vectors into
384
+ biclusters. May be one of 'scale', 'bistochastic', or 'log'.
385
+ The authors recommend using 'log'. If the data is sparse,
386
+ however, log normalization will not work, which is why the
387
+ default is 'bistochastic'.
388
+
389
+ .. warning::
390
+ if `method='log'`, the data must not be sparse.
391
+
392
+ n_components : int, default=6
393
+ Number of singular vectors to check.
394
+
395
+ n_best : int, default=3
396
+ Number of best singular vectors to which to project the data
397
+ for clustering.
398
+
399
+ svd_method : {'randomized', 'arpack'}, default='randomized'
400
+ Selects the algorithm for finding singular vectors. May be
401
+ 'randomized' or 'arpack'. If 'randomized', uses
402
+ :func:`~sklearn.utils.extmath.randomized_svd`, which may be faster
403
+ for large matrices. If 'arpack', uses
404
+ `scipy.sparse.linalg.svds`, which is more accurate, but
405
+ possibly slower in some cases.
406
+
407
+ n_svd_vecs : int, default=None
408
+ Number of vectors to use in calculating the SVD. Corresponds
409
+ to `ncv` when `svd_method=arpack` and `n_oversamples` when
410
+ `svd_method` is 'randomized`.
411
+
412
+ mini_batch : bool, default=False
413
+ Whether to use mini-batch k-means, which is faster but may get
414
+ different results.
415
+
416
+ init : {'k-means++', 'random'} or ndarray of shape (n_clusters, n_features), \
417
+ default='k-means++'
418
+ Method for initialization of k-means algorithm; defaults to
419
+ 'k-means++'.
420
+
421
+ n_init : int, default=10
422
+ Number of random initializations that are tried with the
423
+ k-means algorithm.
424
+
425
+ If mini-batch k-means is used, the best initialization is
426
+ chosen and the algorithm runs once. Otherwise, the algorithm
427
+ is run for each initialization and the best solution chosen.
428
+
429
+ random_state : int, RandomState instance, default=None
430
+ Used for randomizing the singular value decomposition and the k-means
431
+ initialization. Use an int to make the randomness deterministic.
432
+ See :term:`Glossary <random_state>`.
433
+
434
+ Attributes
435
+ ----------
436
+ rows_ : array-like of shape (n_row_clusters, n_rows)
437
+ Results of the clustering. `rows[i, r]` is True if
438
+ cluster `i` contains row `r`. Available only after calling ``fit``.
439
+
440
+ columns_ : array-like of shape (n_column_clusters, n_columns)
441
+ Results of the clustering, like `rows`.
442
+
443
+ row_labels_ : array-like of shape (n_rows,)
444
+ Row partition labels.
445
+
446
+ column_labels_ : array-like of shape (n_cols,)
447
+ Column partition labels.
448
+
449
+ biclusters_ : tuple of two ndarrays
450
+ The tuple contains the `rows_` and `columns_` arrays.
451
+
452
+ n_features_in_ : int
453
+ Number of features seen during :term:`fit`.
454
+
455
+ .. versionadded:: 0.24
456
+
457
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
458
+ Names of features seen during :term:`fit`. Defined only when `X`
459
+ has feature names that are all strings.
460
+
461
+ .. versionadded:: 1.0
462
+
463
+ See Also
464
+ --------
465
+ SpectralCoclustering : Spectral Co-Clustering algorithm (Dhillon, 2001).
466
+
467
+ References
468
+ ----------
469
+
470
+ * :doi:`Kluger, Yuval, et. al., 2003. Spectral biclustering of microarray
471
+ data: coclustering genes and conditions.
472
+ <10.1101/gr.648603>`
473
+
474
+ Examples
475
+ --------
476
+ >>> from sklearn.cluster import SpectralBiclustering
477
+ >>> import numpy as np
478
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
479
+ ... [4, 7], [3, 5], [3, 6]])
480
+ >>> clustering = SpectralBiclustering(n_clusters=2, random_state=0).fit(X)
481
+ >>> clustering.row_labels_
482
+ array([1, 1, 1, 0, 0, 0], dtype=int32)
483
+ >>> clustering.column_labels_
484
+ array([1, 0], dtype=int32)
485
+ >>> clustering
486
+ SpectralBiclustering(n_clusters=2, random_state=0)
487
+ """
488
+
489
+ _parameter_constraints: dict = {
490
+ **BaseSpectral._parameter_constraints,
491
+ "n_clusters": [Interval(Integral, 1, None, closed="left"), tuple],
492
+ "method": [StrOptions({"bistochastic", "scale", "log"})],
493
+ "n_components": [Interval(Integral, 1, None, closed="left")],
494
+ "n_best": [Interval(Integral, 1, None, closed="left")],
495
+ }
496
+
497
+ def __init__(
498
+ self,
499
+ n_clusters=3,
500
+ *,
501
+ method="bistochastic",
502
+ n_components=6,
503
+ n_best=3,
504
+ svd_method="randomized",
505
+ n_svd_vecs=None,
506
+ mini_batch=False,
507
+ init="k-means++",
508
+ n_init=10,
509
+ random_state=None,
510
+ ):
511
+ super().__init__(
512
+ n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state
513
+ )
514
+ self.method = method
515
+ self.n_components = n_components
516
+ self.n_best = n_best
517
+
518
+ def _check_parameters(self, n_samples):
519
+ if isinstance(self.n_clusters, Integral):
520
+ if self.n_clusters > n_samples:
521
+ raise ValueError(
522
+ f"n_clusters should be <= n_samples={n_samples}. Got"
523
+ f" {self.n_clusters} instead."
524
+ )
525
+ else: # tuple
526
+ try:
527
+ n_row_clusters, n_column_clusters = self.n_clusters
528
+ check_scalar(
529
+ n_row_clusters,
530
+ "n_row_clusters",
531
+ target_type=Integral,
532
+ min_val=1,
533
+ max_val=n_samples,
534
+ )
535
+ check_scalar(
536
+ n_column_clusters,
537
+ "n_column_clusters",
538
+ target_type=Integral,
539
+ min_val=1,
540
+ max_val=n_samples,
541
+ )
542
+ except (ValueError, TypeError) as e:
543
+ raise ValueError(
544
+ "Incorrect parameter n_clusters has value:"
545
+ f" {self.n_clusters}. It should either be a single integer"
546
+ " or an iterable with two integers:"
547
+ " (n_row_clusters, n_column_clusters)"
548
+ " And the values are should be in the"
549
+ " range: (1, n_samples)"
550
+ ) from e
551
+
552
+ if self.n_best > self.n_components:
553
+ raise ValueError(
554
+ f"n_best={self.n_best} must be <= n_components={self.n_components}."
555
+ )
556
+
557
+ def _fit(self, X):
558
+ n_sv = self.n_components
559
+ if self.method == "bistochastic":
560
+ normalized_data = _bistochastic_normalize(X)
561
+ n_sv += 1
562
+ elif self.method == "scale":
563
+ normalized_data, _, _ = _scale_normalize(X)
564
+ n_sv += 1
565
+ elif self.method == "log":
566
+ normalized_data = _log_normalize(X)
567
+ n_discard = 0 if self.method == "log" else 1
568
+ u, v = self._svd(normalized_data, n_sv, n_discard)
569
+ ut = u.T
570
+ vt = v.T
571
+
572
+ try:
573
+ n_row_clusters, n_col_clusters = self.n_clusters
574
+ except TypeError:
575
+ n_row_clusters = n_col_clusters = self.n_clusters
576
+
577
+ best_ut = self._fit_best_piecewise(ut, self.n_best, n_row_clusters)
578
+
579
+ best_vt = self._fit_best_piecewise(vt, self.n_best, n_col_clusters)
580
+
581
+ self.row_labels_ = self._project_and_cluster(X, best_vt.T, n_row_clusters)
582
+
583
+ self.column_labels_ = self._project_and_cluster(X.T, best_ut.T, n_col_clusters)
584
+
585
+ self.rows_ = np.vstack(
586
+ [
587
+ self.row_labels_ == label
588
+ for label in range(n_row_clusters)
589
+ for _ in range(n_col_clusters)
590
+ ]
591
+ )
592
+ self.columns_ = np.vstack(
593
+ [
594
+ self.column_labels_ == label
595
+ for _ in range(n_row_clusters)
596
+ for label in range(n_col_clusters)
597
+ ]
598
+ )
599
+
600
+ def _fit_best_piecewise(self, vectors, n_best, n_clusters):
601
+ """Find the ``n_best`` vectors that are best approximated by piecewise
602
+ constant vectors.
603
+
604
+ The piecewise vectors are found by k-means; the best is chosen
605
+ according to Euclidean distance.
606
+
607
+ """
608
+
609
+ def make_piecewise(v):
610
+ centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
611
+ return centroid[labels].ravel()
612
+
613
+ piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vectors)
614
+ dists = np.apply_along_axis(norm, axis=1, arr=(vectors - piecewise_vectors))
615
+ result = vectors[np.argsort(dists)[:n_best]]
616
+ return result
617
+
618
+ def _project_and_cluster(self, data, vectors, n_clusters):
619
+ """Project ``data`` to ``vectors`` and cluster the result."""
620
+ projected = safe_sparse_dot(data, vectors)
621
+ _, labels = self._k_means(projected, n_clusters)
622
+ return labels
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_birch.py ADDED
@@ -0,0 +1,741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Manoj Kumar <[email protected]>
2
+ # Alexandre Gramfort <[email protected]>
3
+ # Joel Nothman <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+ import warnings
7
+ from math import sqrt
8
+ from numbers import Integral, Real
9
+
10
+ import numpy as np
11
+ from scipy import sparse
12
+
13
+ from .._config import config_context
14
+ from ..base import (
15
+ BaseEstimator,
16
+ ClassNamePrefixFeaturesOutMixin,
17
+ ClusterMixin,
18
+ TransformerMixin,
19
+ _fit_context,
20
+ )
21
+ from ..exceptions import ConvergenceWarning
22
+ from ..metrics import pairwise_distances_argmin
23
+ from ..metrics.pairwise import euclidean_distances
24
+ from ..utils._param_validation import Interval
25
+ from ..utils.extmath import row_norms
26
+ from ..utils.validation import check_is_fitted
27
+ from . import AgglomerativeClustering
28
+
29
+
30
+ def _iterate_sparse_X(X):
31
+ """This little hack returns a densified row when iterating over a sparse
32
+ matrix, instead of constructing a sparse matrix for every row that is
33
+ expensive.
34
+ """
35
+ n_samples = X.shape[0]
36
+ X_indices = X.indices
37
+ X_data = X.data
38
+ X_indptr = X.indptr
39
+
40
+ for i in range(n_samples):
41
+ row = np.zeros(X.shape[1])
42
+ startptr, endptr = X_indptr[i], X_indptr[i + 1]
43
+ nonzero_indices = X_indices[startptr:endptr]
44
+ row[nonzero_indices] = X_data[startptr:endptr]
45
+ yield row
46
+
47
+
48
+ def _split_node(node, threshold, branching_factor):
49
+ """The node has to be split if there is no place for a new subcluster
50
+ in the node.
51
+ 1. Two empty nodes and two empty subclusters are initialized.
52
+ 2. The pair of distant subclusters are found.
53
+ 3. The properties of the empty subclusters and nodes are updated
54
+ according to the nearest distance between the subclusters to the
55
+ pair of distant subclusters.
56
+ 4. The two nodes are set as children to the two subclusters.
57
+ """
58
+ new_subcluster1 = _CFSubcluster()
59
+ new_subcluster2 = _CFSubcluster()
60
+ new_node1 = _CFNode(
61
+ threshold=threshold,
62
+ branching_factor=branching_factor,
63
+ is_leaf=node.is_leaf,
64
+ n_features=node.n_features,
65
+ dtype=node.init_centroids_.dtype,
66
+ )
67
+ new_node2 = _CFNode(
68
+ threshold=threshold,
69
+ branching_factor=branching_factor,
70
+ is_leaf=node.is_leaf,
71
+ n_features=node.n_features,
72
+ dtype=node.init_centroids_.dtype,
73
+ )
74
+ new_subcluster1.child_ = new_node1
75
+ new_subcluster2.child_ = new_node2
76
+
77
+ if node.is_leaf:
78
+ if node.prev_leaf_ is not None:
79
+ node.prev_leaf_.next_leaf_ = new_node1
80
+ new_node1.prev_leaf_ = node.prev_leaf_
81
+ new_node1.next_leaf_ = new_node2
82
+ new_node2.prev_leaf_ = new_node1
83
+ new_node2.next_leaf_ = node.next_leaf_
84
+ if node.next_leaf_ is not None:
85
+ node.next_leaf_.prev_leaf_ = new_node2
86
+
87
+ dist = euclidean_distances(
88
+ node.centroids_, Y_norm_squared=node.squared_norm_, squared=True
89
+ )
90
+ n_clusters = dist.shape[0]
91
+
92
+ farthest_idx = np.unravel_index(dist.argmax(), (n_clusters, n_clusters))
93
+ node1_dist, node2_dist = dist[(farthest_idx,)]
94
+
95
+ node1_closer = node1_dist < node2_dist
96
+ # make sure node1 is closest to itself even if all distances are equal.
97
+ # This can only happen when all node.centroids_ are duplicates leading to all
98
+ # distances between centroids being zero.
99
+ node1_closer[farthest_idx[0]] = True
100
+
101
+ for idx, subcluster in enumerate(node.subclusters_):
102
+ if node1_closer[idx]:
103
+ new_node1.append_subcluster(subcluster)
104
+ new_subcluster1.update(subcluster)
105
+ else:
106
+ new_node2.append_subcluster(subcluster)
107
+ new_subcluster2.update(subcluster)
108
+ return new_subcluster1, new_subcluster2
109
+
110
+
111
+ class _CFNode:
112
+ """Each node in a CFTree is called a CFNode.
113
+
114
+ The CFNode can have a maximum of branching_factor
115
+ number of CFSubclusters.
116
+
117
+ Parameters
118
+ ----------
119
+ threshold : float
120
+ Threshold needed for a new subcluster to enter a CFSubcluster.
121
+
122
+ branching_factor : int
123
+ Maximum number of CF subclusters in each node.
124
+
125
+ is_leaf : bool
126
+ We need to know if the CFNode is a leaf or not, in order to
127
+ retrieve the final subclusters.
128
+
129
+ n_features : int
130
+ The number of features.
131
+
132
+ Attributes
133
+ ----------
134
+ subclusters_ : list
135
+ List of subclusters for a particular CFNode.
136
+
137
+ prev_leaf_ : _CFNode
138
+ Useful only if is_leaf is True.
139
+
140
+ next_leaf_ : _CFNode
141
+ next_leaf. Useful only if is_leaf is True.
142
+ the final subclusters.
143
+
144
+ init_centroids_ : ndarray of shape (branching_factor + 1, n_features)
145
+ Manipulate ``init_centroids_`` throughout rather than centroids_ since
146
+ the centroids are just a view of the ``init_centroids_`` .
147
+
148
+ init_sq_norm_ : ndarray of shape (branching_factor + 1,)
149
+ manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
150
+
151
+ centroids_ : ndarray of shape (branching_factor + 1, n_features)
152
+ View of ``init_centroids_``.
153
+
154
+ squared_norm_ : ndarray of shape (branching_factor + 1,)
155
+ View of ``init_sq_norm_``.
156
+
157
+ """
158
+
159
+ def __init__(self, *, threshold, branching_factor, is_leaf, n_features, dtype):
160
+ self.threshold = threshold
161
+ self.branching_factor = branching_factor
162
+ self.is_leaf = is_leaf
163
+ self.n_features = n_features
164
+
165
+ # The list of subclusters, centroids and squared norms
166
+ # to manipulate throughout.
167
+ self.subclusters_ = []
168
+ self.init_centroids_ = np.zeros((branching_factor + 1, n_features), dtype=dtype)
169
+ self.init_sq_norm_ = np.zeros((branching_factor + 1), dtype)
170
+ self.squared_norm_ = []
171
+ self.prev_leaf_ = None
172
+ self.next_leaf_ = None
173
+
174
+ def append_subcluster(self, subcluster):
175
+ n_samples = len(self.subclusters_)
176
+ self.subclusters_.append(subcluster)
177
+ self.init_centroids_[n_samples] = subcluster.centroid_
178
+ self.init_sq_norm_[n_samples] = subcluster.sq_norm_
179
+
180
+ # Keep centroids and squared norm as views. In this way
181
+ # if we change init_centroids and init_sq_norm_, it is
182
+ # sufficient,
183
+ self.centroids_ = self.init_centroids_[: n_samples + 1, :]
184
+ self.squared_norm_ = self.init_sq_norm_[: n_samples + 1]
185
+
186
+ def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2):
187
+ """Remove a subcluster from a node and update it with the
188
+ split subclusters.
189
+ """
190
+ ind = self.subclusters_.index(subcluster)
191
+ self.subclusters_[ind] = new_subcluster1
192
+ self.init_centroids_[ind] = new_subcluster1.centroid_
193
+ self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
194
+ self.append_subcluster(new_subcluster2)
195
+
196
+ def insert_cf_subcluster(self, subcluster):
197
+ """Insert a new subcluster into the node."""
198
+ if not self.subclusters_:
199
+ self.append_subcluster(subcluster)
200
+ return False
201
+
202
+ threshold = self.threshold
203
+ branching_factor = self.branching_factor
204
+ # We need to find the closest subcluster among all the
205
+ # subclusters so that we can insert our new subcluster.
206
+ dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
207
+ dist_matrix *= -2.0
208
+ dist_matrix += self.squared_norm_
209
+ closest_index = np.argmin(dist_matrix)
210
+ closest_subcluster = self.subclusters_[closest_index]
211
+
212
+ # If the subcluster has a child, we need a recursive strategy.
213
+ if closest_subcluster.child_ is not None:
214
+ split_child = closest_subcluster.child_.insert_cf_subcluster(subcluster)
215
+
216
+ if not split_child:
217
+ # If it is determined that the child need not be split, we
218
+ # can just update the closest_subcluster
219
+ closest_subcluster.update(subcluster)
220
+ self.init_centroids_[closest_index] = self.subclusters_[
221
+ closest_index
222
+ ].centroid_
223
+ self.init_sq_norm_[closest_index] = self.subclusters_[
224
+ closest_index
225
+ ].sq_norm_
226
+ return False
227
+
228
+ # things not too good. we need to redistribute the subclusters in
229
+ # our child node, and add a new subcluster in the parent
230
+ # subcluster to accommodate the new child.
231
+ else:
232
+ new_subcluster1, new_subcluster2 = _split_node(
233
+ closest_subcluster.child_,
234
+ threshold,
235
+ branching_factor,
236
+ )
237
+ self.update_split_subclusters(
238
+ closest_subcluster, new_subcluster1, new_subcluster2
239
+ )
240
+
241
+ if len(self.subclusters_) > self.branching_factor:
242
+ return True
243
+ return False
244
+
245
+ # good to go!
246
+ else:
247
+ merged = closest_subcluster.merge_subcluster(subcluster, self.threshold)
248
+ if merged:
249
+ self.init_centroids_[closest_index] = closest_subcluster.centroid_
250
+ self.init_sq_norm_[closest_index] = closest_subcluster.sq_norm_
251
+ return False
252
+
253
+ # not close to any other subclusters, and we still
254
+ # have space, so add.
255
+ elif len(self.subclusters_) < self.branching_factor:
256
+ self.append_subcluster(subcluster)
257
+ return False
258
+
259
+ # We do not have enough space nor is it closer to an
260
+ # other subcluster. We need to split.
261
+ else:
262
+ self.append_subcluster(subcluster)
263
+ return True
264
+
265
+
266
+ class _CFSubcluster:
267
+ """Each subcluster in a CFNode is called a CFSubcluster.
268
+
269
+ A CFSubcluster can have a CFNode has its child.
270
+
271
+ Parameters
272
+ ----------
273
+ linear_sum : ndarray of shape (n_features,), default=None
274
+ Sample. This is kept optional to allow initialization of empty
275
+ subclusters.
276
+
277
+ Attributes
278
+ ----------
279
+ n_samples_ : int
280
+ Number of samples that belong to each subcluster.
281
+
282
+ linear_sum_ : ndarray
283
+ Linear sum of all the samples in a subcluster. Prevents holding
284
+ all sample data in memory.
285
+
286
+ squared_sum_ : float
287
+ Sum of the squared l2 norms of all samples belonging to a subcluster.
288
+
289
+ centroid_ : ndarray of shape (branching_factor + 1, n_features)
290
+ Centroid of the subcluster. Prevent recomputing of centroids when
291
+ ``CFNode.centroids_`` is called.
292
+
293
+ child_ : _CFNode
294
+ Child Node of the subcluster. Once a given _CFNode is set as the child
295
+ of the _CFNode, it is set to ``self.child_``.
296
+
297
+ sq_norm_ : ndarray of shape (branching_factor + 1,)
298
+ Squared norm of the subcluster. Used to prevent recomputing when
299
+ pairwise minimum distances are computed.
300
+ """
301
+
302
+ def __init__(self, *, linear_sum=None):
303
+ if linear_sum is None:
304
+ self.n_samples_ = 0
305
+ self.squared_sum_ = 0.0
306
+ self.centroid_ = self.linear_sum_ = 0
307
+ else:
308
+ self.n_samples_ = 1
309
+ self.centroid_ = self.linear_sum_ = linear_sum
310
+ self.squared_sum_ = self.sq_norm_ = np.dot(
311
+ self.linear_sum_, self.linear_sum_
312
+ )
313
+ self.child_ = None
314
+
315
+ def update(self, subcluster):
316
+ self.n_samples_ += subcluster.n_samples_
317
+ self.linear_sum_ += subcluster.linear_sum_
318
+ self.squared_sum_ += subcluster.squared_sum_
319
+ self.centroid_ = self.linear_sum_ / self.n_samples_
320
+ self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
321
+
322
+ def merge_subcluster(self, nominee_cluster, threshold):
323
+ """Check if a cluster is worthy enough to be merged. If
324
+ yes then merge.
325
+ """
326
+ new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
327
+ new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
328
+ new_n = self.n_samples_ + nominee_cluster.n_samples_
329
+ new_centroid = (1 / new_n) * new_ls
330
+ new_sq_norm = np.dot(new_centroid, new_centroid)
331
+
332
+ # The squared radius of the cluster is defined:
333
+ # r^2 = sum_i ||x_i - c||^2 / n
334
+ # with x_i the n points assigned to the cluster and c its centroid:
335
+ # c = sum_i x_i / n
336
+ # This can be expanded to:
337
+ # r^2 = sum_i ||x_i||^2 / n - 2 < sum_i x_i / n, c> + n ||c||^2 / n
338
+ # and therefore simplifies to:
339
+ # r^2 = sum_i ||x_i||^2 / n - ||c||^2
340
+ sq_radius = new_ss / new_n - new_sq_norm
341
+
342
+ if sq_radius <= threshold**2:
343
+ (
344
+ self.n_samples_,
345
+ self.linear_sum_,
346
+ self.squared_sum_,
347
+ self.centroid_,
348
+ self.sq_norm_,
349
+ ) = (new_n, new_ls, new_ss, new_centroid, new_sq_norm)
350
+ return True
351
+ return False
352
+
353
+ @property
354
+ def radius(self):
355
+ """Return radius of the subcluster"""
356
+ # Because of numerical issues, this could become negative
357
+ sq_radius = self.squared_sum_ / self.n_samples_ - self.sq_norm_
358
+ return sqrt(max(0, sq_radius))
359
+
360
+
361
+ class Birch(
362
+ ClassNamePrefixFeaturesOutMixin, ClusterMixin, TransformerMixin, BaseEstimator
363
+ ):
364
+ """Implements the BIRCH clustering algorithm.
365
+
366
+ It is a memory-efficient, online-learning algorithm provided as an
367
+ alternative to :class:`MiniBatchKMeans`. It constructs a tree
368
+ data structure with the cluster centroids being read off the leaf.
369
+ These can be either the final cluster centroids or can be provided as input
370
+ to another clustering algorithm such as :class:`AgglomerativeClustering`.
371
+
372
+ Read more in the :ref:`User Guide <birch>`.
373
+
374
+ .. versionadded:: 0.16
375
+
376
+ Parameters
377
+ ----------
378
+ threshold : float, default=0.5
379
+ The radius of the subcluster obtained by merging a new sample and the
380
+ closest subcluster should be lesser than the threshold. Otherwise a new
381
+ subcluster is started. Setting this value to be very low promotes
382
+ splitting and vice-versa.
383
+
384
+ branching_factor : int, default=50
385
+ Maximum number of CF subclusters in each node. If a new samples enters
386
+ such that the number of subclusters exceed the branching_factor then
387
+ that node is split into two nodes with the subclusters redistributed
388
+ in each. The parent subcluster of that node is removed and two new
389
+ subclusters are added as parents of the 2 split nodes.
390
+
391
+ n_clusters : int, instance of sklearn.cluster model or None, default=3
392
+ Number of clusters after the final clustering step, which treats the
393
+ subclusters from the leaves as new samples.
394
+
395
+ - `None` : the final clustering step is not performed and the
396
+ subclusters are returned as they are.
397
+
398
+ - :mod:`sklearn.cluster` Estimator : If a model is provided, the model
399
+ is fit treating the subclusters as new samples and the initial data
400
+ is mapped to the label of the closest subcluster.
401
+
402
+ - `int` : the model fit is :class:`AgglomerativeClustering` with
403
+ `n_clusters` set to be equal to the int.
404
+
405
+ compute_labels : bool, default=True
406
+ Whether or not to compute labels for each fit.
407
+
408
+ copy : bool, default=True
409
+ Whether or not to make a copy of the given data. If set to False,
410
+ the initial data will be overwritten.
411
+
412
+ Attributes
413
+ ----------
414
+ root_ : _CFNode
415
+ Root of the CFTree.
416
+
417
+ dummy_leaf_ : _CFNode
418
+ Start pointer to all the leaves.
419
+
420
+ subcluster_centers_ : ndarray
421
+ Centroids of all subclusters read directly from the leaves.
422
+
423
+ subcluster_labels_ : ndarray
424
+ Labels assigned to the centroids of the subclusters after
425
+ they are clustered globally.
426
+
427
+ labels_ : ndarray of shape (n_samples,)
428
+ Array of labels assigned to the input data.
429
+ if partial_fit is used instead of fit, they are assigned to the
430
+ last batch of data.
431
+
432
+ n_features_in_ : int
433
+ Number of features seen during :term:`fit`.
434
+
435
+ .. versionadded:: 0.24
436
+
437
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
438
+ Names of features seen during :term:`fit`. Defined only when `X`
439
+ has feature names that are all strings.
440
+
441
+ .. versionadded:: 1.0
442
+
443
+ See Also
444
+ --------
445
+ MiniBatchKMeans : Alternative implementation that does incremental updates
446
+ of the centers' positions using mini-batches.
447
+
448
+ Notes
449
+ -----
450
+ The tree data structure consists of nodes with each node consisting of
451
+ a number of subclusters. The maximum number of subclusters in a node
452
+ is determined by the branching factor. Each subcluster maintains a
453
+ linear sum, squared sum and the number of samples in that subcluster.
454
+ In addition, each subcluster can also have a node as its child, if the
455
+ subcluster is not a member of a leaf node.
456
+
457
+ For a new point entering the root, it is merged with the subcluster closest
458
+ to it and the linear sum, squared sum and the number of samples of that
459
+ subcluster are updated. This is done recursively till the properties of
460
+ the leaf node are updated.
461
+
462
+ References
463
+ ----------
464
+ * Tian Zhang, Raghu Ramakrishnan, Maron Livny
465
+ BIRCH: An efficient data clustering method for large databases.
466
+ https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
467
+
468
+ * Roberto Perdisci
469
+ JBirch - Java implementation of BIRCH clustering algorithm
470
+ https://code.google.com/archive/p/jbirch
471
+
472
+ Examples
473
+ --------
474
+ >>> from sklearn.cluster import Birch
475
+ >>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
476
+ >>> brc = Birch(n_clusters=None)
477
+ >>> brc.fit(X)
478
+ Birch(n_clusters=None)
479
+ >>> brc.predict(X)
480
+ array([0, 0, 0, 1, 1, 1])
481
+ """
482
+
483
+ _parameter_constraints: dict = {
484
+ "threshold": [Interval(Real, 0.0, None, closed="neither")],
485
+ "branching_factor": [Interval(Integral, 1, None, closed="neither")],
486
+ "n_clusters": [None, ClusterMixin, Interval(Integral, 1, None, closed="left")],
487
+ "compute_labels": ["boolean"],
488
+ "copy": ["boolean"],
489
+ }
490
+
491
+ def __init__(
492
+ self,
493
+ *,
494
+ threshold=0.5,
495
+ branching_factor=50,
496
+ n_clusters=3,
497
+ compute_labels=True,
498
+ copy=True,
499
+ ):
500
+ self.threshold = threshold
501
+ self.branching_factor = branching_factor
502
+ self.n_clusters = n_clusters
503
+ self.compute_labels = compute_labels
504
+ self.copy = copy
505
+
506
+ @_fit_context(prefer_skip_nested_validation=True)
507
+ def fit(self, X, y=None):
508
+ """
509
+ Build a CF Tree for the input data.
510
+
511
+ Parameters
512
+ ----------
513
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
514
+ Input data.
515
+
516
+ y : Ignored
517
+ Not used, present here for API consistency by convention.
518
+
519
+ Returns
520
+ -------
521
+ self
522
+ Fitted estimator.
523
+ """
524
+ return self._fit(X, partial=False)
525
+
526
+ def _fit(self, X, partial):
527
+ has_root = getattr(self, "root_", None)
528
+ first_call = not (partial and has_root)
529
+
530
+ X = self._validate_data(
531
+ X,
532
+ accept_sparse="csr",
533
+ copy=self.copy,
534
+ reset=first_call,
535
+ dtype=[np.float64, np.float32],
536
+ )
537
+ threshold = self.threshold
538
+ branching_factor = self.branching_factor
539
+
540
+ n_samples, n_features = X.shape
541
+
542
+ # If partial_fit is called for the first time or fit is called, we
543
+ # start a new tree.
544
+ if first_call:
545
+ # The first root is the leaf. Manipulate this object throughout.
546
+ self.root_ = _CFNode(
547
+ threshold=threshold,
548
+ branching_factor=branching_factor,
549
+ is_leaf=True,
550
+ n_features=n_features,
551
+ dtype=X.dtype,
552
+ )
553
+
554
+ # To enable getting back subclusters.
555
+ self.dummy_leaf_ = _CFNode(
556
+ threshold=threshold,
557
+ branching_factor=branching_factor,
558
+ is_leaf=True,
559
+ n_features=n_features,
560
+ dtype=X.dtype,
561
+ )
562
+ self.dummy_leaf_.next_leaf_ = self.root_
563
+ self.root_.prev_leaf_ = self.dummy_leaf_
564
+
565
+ # Cannot vectorize. Enough to convince to use cython.
566
+ if not sparse.issparse(X):
567
+ iter_func = iter
568
+ else:
569
+ iter_func = _iterate_sparse_X
570
+
571
+ for sample in iter_func(X):
572
+ subcluster = _CFSubcluster(linear_sum=sample)
573
+ split = self.root_.insert_cf_subcluster(subcluster)
574
+
575
+ if split:
576
+ new_subcluster1, new_subcluster2 = _split_node(
577
+ self.root_, threshold, branching_factor
578
+ )
579
+ del self.root_
580
+ self.root_ = _CFNode(
581
+ threshold=threshold,
582
+ branching_factor=branching_factor,
583
+ is_leaf=False,
584
+ n_features=n_features,
585
+ dtype=X.dtype,
586
+ )
587
+ self.root_.append_subcluster(new_subcluster1)
588
+ self.root_.append_subcluster(new_subcluster2)
589
+
590
+ centroids = np.concatenate([leaf.centroids_ for leaf in self._get_leaves()])
591
+ self.subcluster_centers_ = centroids
592
+ self._n_features_out = self.subcluster_centers_.shape[0]
593
+
594
+ self._global_clustering(X)
595
+ return self
596
+
597
+ def _get_leaves(self):
598
+ """
599
+ Retrieve the leaves of the CF Node.
600
+
601
+ Returns
602
+ -------
603
+ leaves : list of shape (n_leaves,)
604
+ List of the leaf nodes.
605
+ """
606
+ leaf_ptr = self.dummy_leaf_.next_leaf_
607
+ leaves = []
608
+ while leaf_ptr is not None:
609
+ leaves.append(leaf_ptr)
610
+ leaf_ptr = leaf_ptr.next_leaf_
611
+ return leaves
612
+
613
+ @_fit_context(prefer_skip_nested_validation=True)
614
+ def partial_fit(self, X=None, y=None):
615
+ """
616
+ Online learning. Prevents rebuilding of CFTree from scratch.
617
+
618
+ Parameters
619
+ ----------
620
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), \
621
+ default=None
622
+ Input data. If X is not provided, only the global clustering
623
+ step is done.
624
+
625
+ y : Ignored
626
+ Not used, present here for API consistency by convention.
627
+
628
+ Returns
629
+ -------
630
+ self
631
+ Fitted estimator.
632
+ """
633
+ if X is None:
634
+ # Perform just the final global clustering step.
635
+ self._global_clustering()
636
+ return self
637
+ else:
638
+ return self._fit(X, partial=True)
639
+
640
+ def _check_fit(self, X):
641
+ check_is_fitted(self)
642
+
643
+ if (
644
+ hasattr(self, "subcluster_centers_")
645
+ and X.shape[1] != self.subcluster_centers_.shape[1]
646
+ ):
647
+ raise ValueError(
648
+ "Training data and predicted data do not have same number of features."
649
+ )
650
+
651
+ def predict(self, X):
652
+ """
653
+ Predict data using the ``centroids_`` of subclusters.
654
+
655
+ Avoid computation of the row norms of X.
656
+
657
+ Parameters
658
+ ----------
659
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
660
+ Input data.
661
+
662
+ Returns
663
+ -------
664
+ labels : ndarray of shape(n_samples,)
665
+ Labelled data.
666
+ """
667
+ check_is_fitted(self)
668
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
669
+ return self._predict(X)
670
+
671
+ def _predict(self, X):
672
+ """Predict data using the ``centroids_`` of subclusters."""
673
+ kwargs = {"Y_norm_squared": self._subcluster_norms}
674
+
675
+ with config_context(assume_finite=True):
676
+ argmin = pairwise_distances_argmin(
677
+ X, self.subcluster_centers_, metric_kwargs=kwargs
678
+ )
679
+ return self.subcluster_labels_[argmin]
680
+
681
+ def transform(self, X):
682
+ """
683
+ Transform X into subcluster centroids dimension.
684
+
685
+ Each dimension represents the distance from the sample point to each
686
+ cluster centroid.
687
+
688
+ Parameters
689
+ ----------
690
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
691
+ Input data.
692
+
693
+ Returns
694
+ -------
695
+ X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)
696
+ Transformed data.
697
+ """
698
+ check_is_fitted(self)
699
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
700
+ with config_context(assume_finite=True):
701
+ return euclidean_distances(X, self.subcluster_centers_)
702
+
703
+ def _global_clustering(self, X=None):
704
+ """
705
+ Global clustering for the subclusters obtained after fitting
706
+ """
707
+ clusterer = self.n_clusters
708
+ centroids = self.subcluster_centers_
709
+ compute_labels = (X is not None) and self.compute_labels
710
+
711
+ # Preprocessing for the global clustering.
712
+ not_enough_centroids = False
713
+ if isinstance(clusterer, Integral):
714
+ clusterer = AgglomerativeClustering(n_clusters=self.n_clusters)
715
+ # There is no need to perform the global clustering step.
716
+ if len(centroids) < self.n_clusters:
717
+ not_enough_centroids = True
718
+
719
+ # To use in predict to avoid recalculation.
720
+ self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True)
721
+
722
+ if clusterer is None or not_enough_centroids:
723
+ self.subcluster_labels_ = np.arange(len(centroids))
724
+ if not_enough_centroids:
725
+ warnings.warn(
726
+ "Number of subclusters found (%d) by BIRCH is less "
727
+ "than (%d). Decrease the threshold."
728
+ % (len(centroids), self.n_clusters),
729
+ ConvergenceWarning,
730
+ )
731
+ else:
732
+ # The global clustering step that clusters the subclusters of
733
+ # the leaves. It assumes the centroids of the subclusters as
734
+ # samples and finds the final centroids.
735
+ self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_)
736
+
737
+ if compute_labels:
738
+ self.labels_ = self._predict(X)
739
+
740
+ def _more_tags(self):
741
+ return {"preserves_dtype": [np.float64, np.float32]}
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_bisect_k_means.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Bisecting K-means clustering."""
2
+ # Author: Michal Krawczyk <[email protected]>
3
+
4
+ import warnings
5
+
6
+ import numpy as np
7
+ import scipy.sparse as sp
8
+
9
+ from ..base import _fit_context
10
+ from ..utils._openmp_helpers import _openmp_effective_n_threads
11
+ from ..utils._param_validation import Integral, Interval, StrOptions
12
+ from ..utils.extmath import row_norms
13
+ from ..utils.validation import _check_sample_weight, check_is_fitted, check_random_state
14
+ from ._k_means_common import _inertia_dense, _inertia_sparse
15
+ from ._kmeans import (
16
+ _BaseKMeans,
17
+ _kmeans_single_elkan,
18
+ _kmeans_single_lloyd,
19
+ _labels_inertia_threadpool_limit,
20
+ )
21
+
22
+
23
+ class _BisectingTree:
24
+ """Tree structure representing the hierarchical clusters of BisectingKMeans."""
25
+
26
+ def __init__(self, center, indices, score):
27
+ """Create a new cluster node in the tree.
28
+
29
+ The node holds the center of this cluster and the indices of the data points
30
+ that belong to it.
31
+ """
32
+ self.center = center
33
+ self.indices = indices
34
+ self.score = score
35
+
36
+ self.left = None
37
+ self.right = None
38
+
39
+ def split(self, labels, centers, scores):
40
+ """Split the cluster node into two subclusters."""
41
+ self.left = _BisectingTree(
42
+ indices=self.indices[labels == 0], center=centers[0], score=scores[0]
43
+ )
44
+ self.right = _BisectingTree(
45
+ indices=self.indices[labels == 1], center=centers[1], score=scores[1]
46
+ )
47
+
48
+ # reset the indices attribute to save memory
49
+ self.indices = None
50
+
51
+ def get_cluster_to_bisect(self):
52
+ """Return the cluster node to bisect next.
53
+
54
+ It's based on the score of the cluster, which can be either the number of
55
+ data points assigned to that cluster or the inertia of that cluster
56
+ (see `bisecting_strategy` for details).
57
+ """
58
+ max_score = None
59
+
60
+ for cluster_leaf in self.iter_leaves():
61
+ if max_score is None or cluster_leaf.score > max_score:
62
+ max_score = cluster_leaf.score
63
+ best_cluster_leaf = cluster_leaf
64
+
65
+ return best_cluster_leaf
66
+
67
+ def iter_leaves(self):
68
+ """Iterate over all the cluster leaves in the tree."""
69
+ if self.left is None:
70
+ yield self
71
+ else:
72
+ yield from self.left.iter_leaves()
73
+ yield from self.right.iter_leaves()
74
+
75
+
76
+ class BisectingKMeans(_BaseKMeans):
77
+ """Bisecting K-Means clustering.
78
+
79
+ Read more in the :ref:`User Guide <bisect_k_means>`.
80
+
81
+ .. versionadded:: 1.1
82
+
83
+ Parameters
84
+ ----------
85
+ n_clusters : int, default=8
86
+ The number of clusters to form as well as the number of
87
+ centroids to generate.
88
+
89
+ init : {'k-means++', 'random'} or callable, default='random'
90
+ Method for initialization:
91
+
92
+ 'k-means++' : selects initial cluster centers for k-mean
93
+ clustering in a smart way to speed up convergence. See section
94
+ Notes in k_init for more details.
95
+
96
+ 'random': choose `n_clusters` observations (rows) at random from data
97
+ for the initial centroids.
98
+
99
+ If a callable is passed, it should take arguments X, n_clusters and a
100
+ random state and return an initialization.
101
+
102
+ n_init : int, default=1
103
+ Number of time the inner k-means algorithm will be run with different
104
+ centroid seeds in each bisection.
105
+ That will result producing for each bisection best output of n_init
106
+ consecutive runs in terms of inertia.
107
+
108
+ random_state : int, RandomState instance or None, default=None
109
+ Determines random number generation for centroid initialization
110
+ in inner K-Means. Use an int to make the randomness deterministic.
111
+ See :term:`Glossary <random_state>`.
112
+
113
+ max_iter : int, default=300
114
+ Maximum number of iterations of the inner k-means algorithm at each
115
+ bisection.
116
+
117
+ verbose : int, default=0
118
+ Verbosity mode.
119
+
120
+ tol : float, default=1e-4
121
+ Relative tolerance with regards to Frobenius norm of the difference
122
+ in the cluster centers of two consecutive iterations to declare
123
+ convergence. Used in inner k-means algorithm at each bisection to pick
124
+ best possible clusters.
125
+
126
+ copy_x : bool, default=True
127
+ When pre-computing distances it is more numerically accurate to center
128
+ the data first. If copy_x is True (default), then the original data is
129
+ not modified. If False, the original data is modified, and put back
130
+ before the function returns, but small numerical differences may be
131
+ introduced by subtracting and then adding the data mean. Note that if
132
+ the original data is not C-contiguous, a copy will be made even if
133
+ copy_x is False. If the original data is sparse, but not in CSR format,
134
+ a copy will be made even if copy_x is False.
135
+
136
+ algorithm : {"lloyd", "elkan"}, default="lloyd"
137
+ Inner K-means algorithm used in bisection.
138
+ The classical EM-style algorithm is `"lloyd"`.
139
+ The `"elkan"` variation can be more efficient on some datasets with
140
+ well-defined clusters, by using the triangle inequality. However it's
141
+ more memory intensive due to the allocation of an extra array of shape
142
+ `(n_samples, n_clusters)`.
143
+
144
+ bisecting_strategy : {"biggest_inertia", "largest_cluster"},\
145
+ default="biggest_inertia"
146
+ Defines how bisection should be performed:
147
+
148
+ - "biggest_inertia" means that BisectingKMeans will always check
149
+ all calculated cluster for cluster with biggest SSE
150
+ (Sum of squared errors) and bisect it. This approach concentrates on
151
+ precision, but may be costly in terms of execution time (especially for
152
+ larger amount of data points).
153
+
154
+ - "largest_cluster" - BisectingKMeans will always split cluster with
155
+ largest amount of points assigned to it from all clusters
156
+ previously calculated. That should work faster than picking by SSE
157
+ ('biggest_inertia') and may produce similar results in most cases.
158
+
159
+ Attributes
160
+ ----------
161
+ cluster_centers_ : ndarray of shape (n_clusters, n_features)
162
+ Coordinates of cluster centers. If the algorithm stops before fully
163
+ converging (see ``tol`` and ``max_iter``), these will not be
164
+ consistent with ``labels_``.
165
+
166
+ labels_ : ndarray of shape (n_samples,)
167
+ Labels of each point.
168
+
169
+ inertia_ : float
170
+ Sum of squared distances of samples to their closest cluster center,
171
+ weighted by the sample weights if provided.
172
+
173
+ n_features_in_ : int
174
+ Number of features seen during :term:`fit`.
175
+
176
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
177
+ Names of features seen during :term:`fit`. Defined only when `X`
178
+ has feature names that are all strings.
179
+
180
+ See Also
181
+ --------
182
+ KMeans : Original implementation of K-Means algorithm.
183
+
184
+ Notes
185
+ -----
186
+ It might be inefficient when n_cluster is less than 3, due to unnecessary
187
+ calculations for that case.
188
+
189
+ Examples
190
+ --------
191
+ >>> from sklearn.cluster import BisectingKMeans
192
+ >>> import numpy as np
193
+ >>> X = np.array([[1, 1], [10, 1], [3, 1],
194
+ ... [10, 0], [2, 1], [10, 2],
195
+ ... [10, 8], [10, 9], [10, 10]])
196
+ >>> bisect_means = BisectingKMeans(n_clusters=3, random_state=0).fit(X)
197
+ >>> bisect_means.labels_
198
+ array([0, 2, 0, 2, 0, 2, 1, 1, 1], dtype=int32)
199
+ >>> bisect_means.predict([[0, 0], [12, 3]])
200
+ array([0, 2], dtype=int32)
201
+ >>> bisect_means.cluster_centers_
202
+ array([[ 2., 1.],
203
+ [10., 9.],
204
+ [10., 1.]])
205
+ """
206
+
207
+ _parameter_constraints: dict = {
208
+ **_BaseKMeans._parameter_constraints,
209
+ "init": [StrOptions({"k-means++", "random"}), callable],
210
+ "n_init": [Interval(Integral, 1, None, closed="left")],
211
+ "copy_x": ["boolean"],
212
+ "algorithm": [StrOptions({"lloyd", "elkan"})],
213
+ "bisecting_strategy": [StrOptions({"biggest_inertia", "largest_cluster"})],
214
+ }
215
+
216
+ def __init__(
217
+ self,
218
+ n_clusters=8,
219
+ *,
220
+ init="random",
221
+ n_init=1,
222
+ random_state=None,
223
+ max_iter=300,
224
+ verbose=0,
225
+ tol=1e-4,
226
+ copy_x=True,
227
+ algorithm="lloyd",
228
+ bisecting_strategy="biggest_inertia",
229
+ ):
230
+ super().__init__(
231
+ n_clusters=n_clusters,
232
+ init=init,
233
+ max_iter=max_iter,
234
+ verbose=verbose,
235
+ random_state=random_state,
236
+ tol=tol,
237
+ n_init=n_init,
238
+ )
239
+
240
+ self.copy_x = copy_x
241
+ self.algorithm = algorithm
242
+ self.bisecting_strategy = bisecting_strategy
243
+
244
+ def _warn_mkl_vcomp(self, n_active_threads):
245
+ """Warn when vcomp and mkl are both present"""
246
+ warnings.warn(
247
+ "BisectingKMeans is known to have a memory leak on Windows "
248
+ "with MKL, when there are less chunks than available "
249
+ "threads. You can avoid it by setting the environment"
250
+ f" variable OMP_NUM_THREADS={n_active_threads}."
251
+ )
252
+
253
+ def _inertia_per_cluster(self, X, centers, labels, sample_weight):
254
+ """Calculate the sum of squared errors (inertia) per cluster.
255
+
256
+ Parameters
257
+ ----------
258
+ X : {ndarray, csr_matrix} of shape (n_samples, n_features)
259
+ The input samples.
260
+
261
+ centers : ndarray of shape (n_clusters=2, n_features)
262
+ The cluster centers.
263
+
264
+ labels : ndarray of shape (n_samples,)
265
+ Index of the cluster each sample belongs to.
266
+
267
+ sample_weight : ndarray of shape (n_samples,)
268
+ The weights for each observation in X.
269
+
270
+ Returns
271
+ -------
272
+ inertia_per_cluster : ndarray of shape (n_clusters=2,)
273
+ Sum of squared errors (inertia) for each cluster.
274
+ """
275
+ n_clusters = centers.shape[0] # = 2 since centers comes from a bisection
276
+ _inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense
277
+
278
+ inertia_per_cluster = np.empty(n_clusters)
279
+ for label in range(n_clusters):
280
+ inertia_per_cluster[label] = _inertia(
281
+ X, sample_weight, centers, labels, self._n_threads, single_label=label
282
+ )
283
+
284
+ return inertia_per_cluster
285
+
286
+ def _bisect(self, X, x_squared_norms, sample_weight, cluster_to_bisect):
287
+ """Split a cluster into 2 subsclusters.
288
+
289
+ Parameters
290
+ ----------
291
+ X : {ndarray, csr_matrix} of shape (n_samples, n_features)
292
+ Training instances to cluster.
293
+
294
+ x_squared_norms : ndarray of shape (n_samples,)
295
+ Squared euclidean norm of each data point.
296
+
297
+ sample_weight : ndarray of shape (n_samples,)
298
+ The weights for each observation in X.
299
+
300
+ cluster_to_bisect : _BisectingTree node object
301
+ The cluster node to split.
302
+ """
303
+ X = X[cluster_to_bisect.indices]
304
+ x_squared_norms = x_squared_norms[cluster_to_bisect.indices]
305
+ sample_weight = sample_weight[cluster_to_bisect.indices]
306
+
307
+ best_inertia = None
308
+
309
+ # Split samples in X into 2 clusters.
310
+ # Repeating `n_init` times to obtain best clusters
311
+ for _ in range(self.n_init):
312
+ centers_init = self._init_centroids(
313
+ X,
314
+ x_squared_norms=x_squared_norms,
315
+ init=self.init,
316
+ random_state=self._random_state,
317
+ n_centroids=2,
318
+ sample_weight=sample_weight,
319
+ )
320
+
321
+ labels, inertia, centers, _ = self._kmeans_single(
322
+ X,
323
+ sample_weight,
324
+ centers_init,
325
+ max_iter=self.max_iter,
326
+ verbose=self.verbose,
327
+ tol=self.tol,
328
+ n_threads=self._n_threads,
329
+ )
330
+
331
+ # allow small tolerance on the inertia to accommodate for
332
+ # non-deterministic rounding errors due to parallel computation
333
+ if best_inertia is None or inertia < best_inertia * (1 - 1e-6):
334
+ best_labels = labels
335
+ best_centers = centers
336
+ best_inertia = inertia
337
+
338
+ if self.verbose:
339
+ print(f"New centroids from bisection: {best_centers}")
340
+
341
+ if self.bisecting_strategy == "biggest_inertia":
342
+ scores = self._inertia_per_cluster(
343
+ X, best_centers, best_labels, sample_weight
344
+ )
345
+ else: # bisecting_strategy == "largest_cluster"
346
+ # Using minlength to make sure that we have the counts for both labels even
347
+ # if all samples are labelled 0.
348
+ scores = np.bincount(best_labels, minlength=2)
349
+
350
+ cluster_to_bisect.split(best_labels, best_centers, scores)
351
+
352
+ @_fit_context(prefer_skip_nested_validation=True)
353
+ def fit(self, X, y=None, sample_weight=None):
354
+ """Compute bisecting k-means clustering.
355
+
356
+ Parameters
357
+ ----------
358
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
359
+
360
+ Training instances to cluster.
361
+
362
+ .. note:: The data will be converted to C ordering,
363
+ which will cause a memory copy
364
+ if the given data is not C-contiguous.
365
+
366
+ y : Ignored
367
+ Not used, present here for API consistency by convention.
368
+
369
+ sample_weight : array-like of shape (n_samples,), default=None
370
+ The weights for each observation in X. If None, all observations
371
+ are assigned equal weight. `sample_weight` is not used during
372
+ initialization if `init` is a callable.
373
+
374
+ Returns
375
+ -------
376
+ self
377
+ Fitted estimator.
378
+ """
379
+ X = self._validate_data(
380
+ X,
381
+ accept_sparse="csr",
382
+ dtype=[np.float64, np.float32],
383
+ order="C",
384
+ copy=self.copy_x,
385
+ accept_large_sparse=False,
386
+ )
387
+
388
+ self._check_params_vs_input(X)
389
+
390
+ self._random_state = check_random_state(self.random_state)
391
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
392
+ self._n_threads = _openmp_effective_n_threads()
393
+
394
+ if self.algorithm == "lloyd" or self.n_clusters == 1:
395
+ self._kmeans_single = _kmeans_single_lloyd
396
+ self._check_mkl_vcomp(X, X.shape[0])
397
+ else:
398
+ self._kmeans_single = _kmeans_single_elkan
399
+
400
+ # Subtract of mean of X for more accurate distance computations
401
+ if not sp.issparse(X):
402
+ self._X_mean = X.mean(axis=0)
403
+ X -= self._X_mean
404
+
405
+ # Initialize the hierarchical clusters tree
406
+ self._bisecting_tree = _BisectingTree(
407
+ indices=np.arange(X.shape[0]),
408
+ center=X.mean(axis=0),
409
+ score=0,
410
+ )
411
+
412
+ x_squared_norms = row_norms(X, squared=True)
413
+
414
+ for _ in range(self.n_clusters - 1):
415
+ # Chose cluster to bisect
416
+ cluster_to_bisect = self._bisecting_tree.get_cluster_to_bisect()
417
+
418
+ # Split this cluster into 2 subclusters
419
+ self._bisect(X, x_squared_norms, sample_weight, cluster_to_bisect)
420
+
421
+ # Aggregate final labels and centers from the bisecting tree
422
+ self.labels_ = np.full(X.shape[0], -1, dtype=np.int32)
423
+ self.cluster_centers_ = np.empty((self.n_clusters, X.shape[1]), dtype=X.dtype)
424
+
425
+ for i, cluster_node in enumerate(self._bisecting_tree.iter_leaves()):
426
+ self.labels_[cluster_node.indices] = i
427
+ self.cluster_centers_[i] = cluster_node.center
428
+ cluster_node.label = i # label final clusters for future prediction
429
+ cluster_node.indices = None # release memory
430
+
431
+ # Restore original data
432
+ if not sp.issparse(X):
433
+ X += self._X_mean
434
+ self.cluster_centers_ += self._X_mean
435
+
436
+ _inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense
437
+ self.inertia_ = _inertia(
438
+ X, sample_weight, self.cluster_centers_, self.labels_, self._n_threads
439
+ )
440
+
441
+ self._n_features_out = self.cluster_centers_.shape[0]
442
+
443
+ return self
444
+
445
+ def predict(self, X):
446
+ """Predict which cluster each sample in X belongs to.
447
+
448
+ Prediction is made by going down the hierarchical tree
449
+ in searching of closest leaf cluster.
450
+
451
+ In the vector quantization literature, `cluster_centers_` is called
452
+ the code book and each value returned by `predict` is the index of
453
+ the closest code in the code book.
454
+
455
+ Parameters
456
+ ----------
457
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
458
+ New data to predict.
459
+
460
+ Returns
461
+ -------
462
+ labels : ndarray of shape (n_samples,)
463
+ Index of the cluster each sample belongs to.
464
+ """
465
+ check_is_fitted(self)
466
+
467
+ X = self._check_test_data(X)
468
+ x_squared_norms = row_norms(X, squared=True)
469
+
470
+ # sample weights are unused but necessary in cython helpers
471
+ sample_weight = np.ones_like(x_squared_norms)
472
+
473
+ labels = self._predict_recursive(X, sample_weight, self._bisecting_tree)
474
+
475
+ return labels
476
+
477
+ def _predict_recursive(self, X, sample_weight, cluster_node):
478
+ """Predict recursively by going down the hierarchical tree.
479
+
480
+ Parameters
481
+ ----------
482
+ X : {ndarray, csr_matrix} of shape (n_samples, n_features)
483
+ The data points, currently assigned to `cluster_node`, to predict between
484
+ the subclusters of this node.
485
+
486
+ sample_weight : ndarray of shape (n_samples,)
487
+ The weights for each observation in X.
488
+
489
+ cluster_node : _BisectingTree node object
490
+ The cluster node of the hierarchical tree.
491
+
492
+ Returns
493
+ -------
494
+ labels : ndarray of shape (n_samples,)
495
+ Index of the cluster each sample belongs to.
496
+ """
497
+ if cluster_node.left is None:
498
+ # This cluster has no subcluster. Labels are just the label of the cluster.
499
+ return np.full(X.shape[0], cluster_node.label, dtype=np.int32)
500
+
501
+ # Determine if data points belong to the left or right subcluster
502
+ centers = np.vstack((cluster_node.left.center, cluster_node.right.center))
503
+ if hasattr(self, "_X_mean"):
504
+ centers += self._X_mean
505
+
506
+ cluster_labels = _labels_inertia_threadpool_limit(
507
+ X,
508
+ sample_weight,
509
+ centers,
510
+ self._n_threads,
511
+ return_inertia=False,
512
+ )
513
+ mask = cluster_labels == 0
514
+
515
+ # Compute the labels for each subset of the data points.
516
+ labels = np.full(X.shape[0], -1, dtype=np.int32)
517
+
518
+ labels[mask] = self._predict_recursive(
519
+ X[mask], sample_weight[mask], cluster_node.left
520
+ )
521
+
522
+ labels[~mask] = self._predict_recursive(
523
+ X[~mask], sample_weight[~mask], cluster_node.right
524
+ )
525
+
526
+ return labels
527
+
528
+ def _more_tags(self):
529
+ return {"preserves_dtype": [np.float64, np.float32]}
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DBSCAN: Density-Based Spatial Clustering of Applications with Noise
3
+ """
4
+
5
+ # Author: Robert Layton <[email protected]>
6
+ # Joel Nothman <[email protected]>
7
+ # Lars Buitinck
8
+ #
9
+ # License: BSD 3 clause
10
+
11
+ import warnings
12
+ from numbers import Integral, Real
13
+
14
+ import numpy as np
15
+ from scipy import sparse
16
+
17
+ from ..base import BaseEstimator, ClusterMixin, _fit_context
18
+ from ..metrics.pairwise import _VALID_METRICS
19
+ from ..neighbors import NearestNeighbors
20
+ from ..utils._param_validation import Interval, StrOptions, validate_params
21
+ from ..utils.validation import _check_sample_weight
22
+ from ._dbscan_inner import dbscan_inner
23
+
24
+
25
+ @validate_params(
26
+ {
27
+ "X": ["array-like", "sparse matrix"],
28
+ "sample_weight": ["array-like", None],
29
+ },
30
+ prefer_skip_nested_validation=False,
31
+ )
32
+ def dbscan(
33
+ X,
34
+ eps=0.5,
35
+ *,
36
+ min_samples=5,
37
+ metric="minkowski",
38
+ metric_params=None,
39
+ algorithm="auto",
40
+ leaf_size=30,
41
+ p=2,
42
+ sample_weight=None,
43
+ n_jobs=None,
44
+ ):
45
+ """Perform DBSCAN clustering from vector array or distance matrix.
46
+
47
+ Read more in the :ref:`User Guide <dbscan>`.
48
+
49
+ Parameters
50
+ ----------
51
+ X : {array-like, sparse (CSR) matrix} of shape (n_samples, n_features) or \
52
+ (n_samples, n_samples)
53
+ A feature array, or array of distances between samples if
54
+ ``metric='precomputed'``.
55
+
56
+ eps : float, default=0.5
57
+ The maximum distance between two samples for one to be considered
58
+ as in the neighborhood of the other. This is not a maximum bound
59
+ on the distances of points within a cluster. This is the most
60
+ important DBSCAN parameter to choose appropriately for your data set
61
+ and distance function.
62
+
63
+ min_samples : int, default=5
64
+ The number of samples (or total weight) in a neighborhood for a point
65
+ to be considered as a core point. This includes the point itself.
66
+
67
+ metric : str or callable, default='minkowski'
68
+ The metric to use when calculating distance between instances in a
69
+ feature array. If metric is a string or callable, it must be one of
70
+ the options allowed by :func:`sklearn.metrics.pairwise_distances` for
71
+ its metric parameter.
72
+ If metric is "precomputed", X is assumed to be a distance matrix and
73
+ must be square during fit.
74
+ X may be a :term:`sparse graph <sparse graph>`,
75
+ in which case only "nonzero" elements may be considered neighbors.
76
+
77
+ metric_params : dict, default=None
78
+ Additional keyword arguments for the metric function.
79
+
80
+ .. versionadded:: 0.19
81
+
82
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
83
+ The algorithm to be used by the NearestNeighbors module
84
+ to compute pointwise distances and find nearest neighbors.
85
+ See NearestNeighbors module documentation for details.
86
+
87
+ leaf_size : int, default=30
88
+ Leaf size passed to BallTree or cKDTree. This can affect the speed
89
+ of the construction and query, as well as the memory required
90
+ to store the tree. The optimal value depends
91
+ on the nature of the problem.
92
+
93
+ p : float, default=2
94
+ The power of the Minkowski metric to be used to calculate distance
95
+ between points.
96
+
97
+ sample_weight : array-like of shape (n_samples,), default=None
98
+ Weight of each sample, such that a sample with a weight of at least
99
+ ``min_samples`` is by itself a core sample; a sample with negative
100
+ weight may inhibit its eps-neighbor from being core.
101
+ Note that weights are absolute, and default to 1.
102
+
103
+ n_jobs : int, default=None
104
+ The number of parallel jobs to run for neighbors search. ``None`` means
105
+ 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means
106
+ using all processors. See :term:`Glossary <n_jobs>` for more details.
107
+ If precomputed distance are used, parallel execution is not available
108
+ and thus n_jobs will have no effect.
109
+
110
+ Returns
111
+ -------
112
+ core_samples : ndarray of shape (n_core_samples,)
113
+ Indices of core samples.
114
+
115
+ labels : ndarray of shape (n_samples,)
116
+ Cluster labels for each point. Noisy samples are given the label -1.
117
+
118
+ See Also
119
+ --------
120
+ DBSCAN : An estimator interface for this clustering algorithm.
121
+ OPTICS : A similar estimator interface clustering at multiple values of
122
+ eps. Our implementation is optimized for memory usage.
123
+
124
+ Notes
125
+ -----
126
+ For an example, see :ref:`examples/cluster/plot_dbscan.py
127
+ <sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
128
+
129
+ This implementation bulk-computes all neighborhood queries, which increases
130
+ the memory complexity to O(n.d) where d is the average number of neighbors,
131
+ while original DBSCAN had memory complexity O(n). It may attract a higher
132
+ memory complexity when querying these nearest neighborhoods, depending
133
+ on the ``algorithm``.
134
+
135
+ One way to avoid the query complexity is to pre-compute sparse
136
+ neighborhoods in chunks using
137
+ :func:`NearestNeighbors.radius_neighbors_graph
138
+ <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
139
+ ``mode='distance'``, then using ``metric='precomputed'`` here.
140
+
141
+ Another way to reduce memory and computation time is to remove
142
+ (near-)duplicate points and use ``sample_weight`` instead.
143
+
144
+ :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower
145
+ memory usage.
146
+
147
+ References
148
+ ----------
149
+ Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based
150
+ Algorithm for Discovering Clusters in Large Spatial Databases with Noise"
151
+ <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_.
152
+ In: Proceedings of the 2nd International Conference on Knowledge Discovery
153
+ and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
154
+
155
+ Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017).
156
+ :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN."
157
+ <10.1145/3068335>`
158
+ ACM Transactions on Database Systems (TODS), 42(3), 19.
159
+
160
+ Examples
161
+ --------
162
+ >>> from sklearn.cluster import dbscan
163
+ >>> X = [[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]]
164
+ >>> core_samples, labels = dbscan(X, eps=3, min_samples=2)
165
+ >>> core_samples
166
+ array([0, 1, 2, 3, 4])
167
+ >>> labels
168
+ array([ 0, 0, 0, 1, 1, -1])
169
+ """
170
+
171
+ est = DBSCAN(
172
+ eps=eps,
173
+ min_samples=min_samples,
174
+ metric=metric,
175
+ metric_params=metric_params,
176
+ algorithm=algorithm,
177
+ leaf_size=leaf_size,
178
+ p=p,
179
+ n_jobs=n_jobs,
180
+ )
181
+ est.fit(X, sample_weight=sample_weight)
182
+ return est.core_sample_indices_, est.labels_
183
+
184
+
185
+ class DBSCAN(ClusterMixin, BaseEstimator):
186
+ """Perform DBSCAN clustering from vector array or distance matrix.
187
+
188
+ DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
189
+ Finds core samples of high density and expands clusters from them.
190
+ Good for data which contains clusters of similar density.
191
+
192
+ The worst case memory complexity of DBSCAN is :math:`O({n}^2)`, which can
193
+ occur when the `eps` param is large and `min_samples` is low.
194
+
195
+ Read more in the :ref:`User Guide <dbscan>`.
196
+
197
+ Parameters
198
+ ----------
199
+ eps : float, default=0.5
200
+ The maximum distance between two samples for one to be considered
201
+ as in the neighborhood of the other. This is not a maximum bound
202
+ on the distances of points within a cluster. This is the most
203
+ important DBSCAN parameter to choose appropriately for your data set
204
+ and distance function.
205
+
206
+ min_samples : int, default=5
207
+ The number of samples (or total weight) in a neighborhood for a point to
208
+ be considered as a core point. This includes the point itself. If
209
+ `min_samples` is set to a higher value, DBSCAN will find denser clusters,
210
+ whereas if it is set to a lower value, the found clusters will be more
211
+ sparse.
212
+
213
+ metric : str, or callable, default='euclidean'
214
+ The metric to use when calculating distance between instances in a
215
+ feature array. If metric is a string or callable, it must be one of
216
+ the options allowed by :func:`sklearn.metrics.pairwise_distances` for
217
+ its metric parameter.
218
+ If metric is "precomputed", X is assumed to be a distance matrix and
219
+ must be square. X may be a :term:`sparse graph`, in which
220
+ case only "nonzero" elements may be considered neighbors for DBSCAN.
221
+
222
+ .. versionadded:: 0.17
223
+ metric *precomputed* to accept precomputed sparse matrix.
224
+
225
+ metric_params : dict, default=None
226
+ Additional keyword arguments for the metric function.
227
+
228
+ .. versionadded:: 0.19
229
+
230
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
231
+ The algorithm to be used by the NearestNeighbors module
232
+ to compute pointwise distances and find nearest neighbors.
233
+ See NearestNeighbors module documentation for details.
234
+
235
+ leaf_size : int, default=30
236
+ Leaf size passed to BallTree or cKDTree. This can affect the speed
237
+ of the construction and query, as well as the memory required
238
+ to store the tree. The optimal value depends
239
+ on the nature of the problem.
240
+
241
+ p : float, default=None
242
+ The power of the Minkowski metric to be used to calculate distance
243
+ between points. If None, then ``p=2`` (equivalent to the Euclidean
244
+ distance).
245
+
246
+ n_jobs : int, default=None
247
+ The number of parallel jobs to run.
248
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
249
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
250
+ for more details.
251
+
252
+ Attributes
253
+ ----------
254
+ core_sample_indices_ : ndarray of shape (n_core_samples,)
255
+ Indices of core samples.
256
+
257
+ components_ : ndarray of shape (n_core_samples, n_features)
258
+ Copy of each core sample found by training.
259
+
260
+ labels_ : ndarray of shape (n_samples)
261
+ Cluster labels for each point in the dataset given to fit().
262
+ Noisy samples are given the label -1.
263
+
264
+ n_features_in_ : int
265
+ Number of features seen during :term:`fit`.
266
+
267
+ .. versionadded:: 0.24
268
+
269
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
270
+ Names of features seen during :term:`fit`. Defined only when `X`
271
+ has feature names that are all strings.
272
+
273
+ .. versionadded:: 1.0
274
+
275
+ See Also
276
+ --------
277
+ OPTICS : A similar clustering at multiple values of eps. Our implementation
278
+ is optimized for memory usage.
279
+
280
+ Notes
281
+ -----
282
+ For an example, see :ref:`examples/cluster/plot_dbscan.py
283
+ <sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
284
+
285
+ This implementation bulk-computes all neighborhood queries, which increases
286
+ the memory complexity to O(n.d) where d is the average number of neighbors,
287
+ while original DBSCAN had memory complexity O(n). It may attract a higher
288
+ memory complexity when querying these nearest neighborhoods, depending
289
+ on the ``algorithm``.
290
+
291
+ One way to avoid the query complexity is to pre-compute sparse
292
+ neighborhoods in chunks using
293
+ :func:`NearestNeighbors.radius_neighbors_graph
294
+ <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
295
+ ``mode='distance'``, then using ``metric='precomputed'`` here.
296
+
297
+ Another way to reduce memory and computation time is to remove
298
+ (near-)duplicate points and use ``sample_weight`` instead.
299
+
300
+ :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory
301
+ usage.
302
+
303
+ References
304
+ ----------
305
+ Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based
306
+ Algorithm for Discovering Clusters in Large Spatial Databases with Noise"
307
+ <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_.
308
+ In: Proceedings of the 2nd International Conference on Knowledge Discovery
309
+ and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
310
+
311
+ Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017).
312
+ :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN."
313
+ <10.1145/3068335>`
314
+ ACM Transactions on Database Systems (TODS), 42(3), 19.
315
+
316
+ Examples
317
+ --------
318
+ >>> from sklearn.cluster import DBSCAN
319
+ >>> import numpy as np
320
+ >>> X = np.array([[1, 2], [2, 2], [2, 3],
321
+ ... [8, 7], [8, 8], [25, 80]])
322
+ >>> clustering = DBSCAN(eps=3, min_samples=2).fit(X)
323
+ >>> clustering.labels_
324
+ array([ 0, 0, 0, 1, 1, -1])
325
+ >>> clustering
326
+ DBSCAN(eps=3, min_samples=2)
327
+ """
328
+
329
+ _parameter_constraints: dict = {
330
+ "eps": [Interval(Real, 0.0, None, closed="neither")],
331
+ "min_samples": [Interval(Integral, 1, None, closed="left")],
332
+ "metric": [
333
+ StrOptions(set(_VALID_METRICS) | {"precomputed"}),
334
+ callable,
335
+ ],
336
+ "metric_params": [dict, None],
337
+ "algorithm": [StrOptions({"auto", "ball_tree", "kd_tree", "brute"})],
338
+ "leaf_size": [Interval(Integral, 1, None, closed="left")],
339
+ "p": [Interval(Real, 0.0, None, closed="left"), None],
340
+ "n_jobs": [Integral, None],
341
+ }
342
+
343
+ def __init__(
344
+ self,
345
+ eps=0.5,
346
+ *,
347
+ min_samples=5,
348
+ metric="euclidean",
349
+ metric_params=None,
350
+ algorithm="auto",
351
+ leaf_size=30,
352
+ p=None,
353
+ n_jobs=None,
354
+ ):
355
+ self.eps = eps
356
+ self.min_samples = min_samples
357
+ self.metric = metric
358
+ self.metric_params = metric_params
359
+ self.algorithm = algorithm
360
+ self.leaf_size = leaf_size
361
+ self.p = p
362
+ self.n_jobs = n_jobs
363
+
364
+ @_fit_context(
365
+ # DBSCAN.metric is not validated yet
366
+ prefer_skip_nested_validation=False
367
+ )
368
+ def fit(self, X, y=None, sample_weight=None):
369
+ """Perform DBSCAN clustering from features, or distance matrix.
370
+
371
+ Parameters
372
+ ----------
373
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
374
+ (n_samples, n_samples)
375
+ Training instances to cluster, or distances between instances if
376
+ ``metric='precomputed'``. If a sparse matrix is provided, it will
377
+ be converted into a sparse ``csr_matrix``.
378
+
379
+ y : Ignored
380
+ Not used, present here for API consistency by convention.
381
+
382
+ sample_weight : array-like of shape (n_samples,), default=None
383
+ Weight of each sample, such that a sample with a weight of at least
384
+ ``min_samples`` is by itself a core sample; a sample with a
385
+ negative weight may inhibit its eps-neighbor from being core.
386
+ Note that weights are absolute, and default to 1.
387
+
388
+ Returns
389
+ -------
390
+ self : object
391
+ Returns a fitted instance of self.
392
+ """
393
+ X = self._validate_data(X, accept_sparse="csr")
394
+
395
+ if sample_weight is not None:
396
+ sample_weight = _check_sample_weight(sample_weight, X)
397
+
398
+ # Calculate neighborhood for all samples. This leaves the original
399
+ # point in, which needs to be considered later (i.e. point i is in the
400
+ # neighborhood of point i. While True, its useless information)
401
+ if self.metric == "precomputed" and sparse.issparse(X):
402
+ # set the diagonal to explicit values, as a point is its own
403
+ # neighbor
404
+ X = X.copy() # copy to avoid in-place modification
405
+ with warnings.catch_warnings():
406
+ warnings.simplefilter("ignore", sparse.SparseEfficiencyWarning)
407
+ X.setdiag(X.diagonal())
408
+
409
+ neighbors_model = NearestNeighbors(
410
+ radius=self.eps,
411
+ algorithm=self.algorithm,
412
+ leaf_size=self.leaf_size,
413
+ metric=self.metric,
414
+ metric_params=self.metric_params,
415
+ p=self.p,
416
+ n_jobs=self.n_jobs,
417
+ )
418
+ neighbors_model.fit(X)
419
+ # This has worst case O(n^2) memory complexity
420
+ neighborhoods = neighbors_model.radius_neighbors(X, return_distance=False)
421
+
422
+ if sample_weight is None:
423
+ n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])
424
+ else:
425
+ n_neighbors = np.array(
426
+ [np.sum(sample_weight[neighbors]) for neighbors in neighborhoods]
427
+ )
428
+
429
+ # Initially, all samples are noise.
430
+ labels = np.full(X.shape[0], -1, dtype=np.intp)
431
+
432
+ # A list of all core samples found.
433
+ core_samples = np.asarray(n_neighbors >= self.min_samples, dtype=np.uint8)
434
+ dbscan_inner(core_samples, neighborhoods, labels)
435
+
436
+ self.core_sample_indices_ = np.where(core_samples)[0]
437
+ self.labels_ = labels
438
+
439
+ if len(self.core_sample_indices_):
440
+ # fix for scipy sparse indexing issue
441
+ self.components_ = X[self.core_sample_indices_].copy()
442
+ else:
443
+ # no core samples
444
+ self.components_ = np.empty((0, X.shape[1]))
445
+ return self
446
+
447
+ def fit_predict(self, X, y=None, sample_weight=None):
448
+ """Compute clusters from a data or distance matrix and predict labels.
449
+
450
+ Parameters
451
+ ----------
452
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
453
+ (n_samples, n_samples)
454
+ Training instances to cluster, or distances between instances if
455
+ ``metric='precomputed'``. If a sparse matrix is provided, it will
456
+ be converted into a sparse ``csr_matrix``.
457
+
458
+ y : Ignored
459
+ Not used, present here for API consistency by convention.
460
+
461
+ sample_weight : array-like of shape (n_samples,), default=None
462
+ Weight of each sample, such that a sample with a weight of at least
463
+ ``min_samples`` is by itself a core sample; a sample with a
464
+ negative weight may inhibit its eps-neighbor from being core.
465
+ Note that weights are absolute, and default to 1.
466
+
467
+ Returns
468
+ -------
469
+ labels : ndarray of shape (n_samples,)
470
+ Cluster labels. Noisy samples are given the label -1.
471
+ """
472
+ self.fit(X, sample_weight=sample_weight)
473
+ return self.labels_
474
+
475
+ def _more_tags(self):
476
+ return {"pairwise": self.metric == "precomputed"}
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_dbscan_inner.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (221 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_feature_agglomeration.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Feature agglomeration. Base classes and functions for performing feature
3
+ agglomeration.
4
+ """
5
+ # Author: V. Michel, A. Gramfort
6
+ # License: BSD 3 clause
7
+
8
+ import warnings
9
+
10
+ import numpy as np
11
+ from scipy.sparse import issparse
12
+
13
+ from ..base import TransformerMixin
14
+ from ..utils import metadata_routing
15
+ from ..utils.validation import check_is_fitted
16
+
17
+ ###############################################################################
18
+ # Mixin class for feature agglomeration.
19
+
20
+
21
+ class AgglomerationTransform(TransformerMixin):
22
+ """
23
+ A class for feature agglomeration via the transform interface.
24
+ """
25
+
26
+ # This prevents ``set_split_inverse_transform`` to be generated for the
27
+ # non-standard ``Xred`` arg on ``inverse_transform``.
28
+ # TODO(1.5): remove when Xred is removed for inverse_transform.
29
+ __metadata_request__inverse_transform = {"Xred": metadata_routing.UNUSED}
30
+
31
+ def transform(self, X):
32
+ """
33
+ Transform a new matrix using the built clustering.
34
+
35
+ Parameters
36
+ ----------
37
+ X : array-like of shape (n_samples, n_features) or \
38
+ (n_samples, n_samples)
39
+ A M by N array of M observations in N dimensions or a length
40
+ M array of M one-dimensional observations.
41
+
42
+ Returns
43
+ -------
44
+ Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
45
+ The pooled values for each feature cluster.
46
+ """
47
+ check_is_fitted(self)
48
+
49
+ X = self._validate_data(X, reset=False)
50
+ if self.pooling_func == np.mean and not issparse(X):
51
+ size = np.bincount(self.labels_)
52
+ n_samples = X.shape[0]
53
+ # a fast way to compute the mean of grouped features
54
+ nX = np.array(
55
+ [np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
56
+ )
57
+ else:
58
+ nX = [
59
+ self.pooling_func(X[:, self.labels_ == l], axis=1)
60
+ for l in np.unique(self.labels_)
61
+ ]
62
+ nX = np.array(nX).T
63
+ return nX
64
+
65
+ def inverse_transform(self, Xt=None, Xred=None):
66
+ """
67
+ Inverse the transformation and return a vector of size `n_features`.
68
+
69
+ Parameters
70
+ ----------
71
+ Xt : array-like of shape (n_samples, n_clusters) or (n_clusters,)
72
+ The values to be assigned to each cluster of samples.
73
+
74
+ Xred : deprecated
75
+ Use `Xt` instead.
76
+
77
+ .. deprecated:: 1.3
78
+
79
+ Returns
80
+ -------
81
+ X : ndarray of shape (n_samples, n_features) or (n_features,)
82
+ A vector of size `n_samples` with the values of `Xred` assigned to
83
+ each of the cluster of samples.
84
+ """
85
+ if Xt is None and Xred is None:
86
+ raise TypeError("Missing required positional argument: Xt")
87
+
88
+ if Xred is not None and Xt is not None:
89
+ raise ValueError("Please provide only `Xt`, and not `Xred`.")
90
+
91
+ if Xred is not None:
92
+ warnings.warn(
93
+ (
94
+ "Input argument `Xred` was renamed to `Xt` in v1.3 and will be"
95
+ " removed in v1.5."
96
+ ),
97
+ FutureWarning,
98
+ )
99
+ Xt = Xred
100
+
101
+ check_is_fitted(self)
102
+
103
+ unil, inverse = np.unique(self.labels_, return_inverse=True)
104
+ return Xt[..., inverse]
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/hdbscan.cpython-310.pyc ADDED
Binary file (31 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_linkage.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (258 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_reachability.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (365 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (385 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.pxd ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2015, Leland McInnes
2
+ # All rights reserved.
3
+
4
+ # Redistribution and use in source and binary forms, with or without
5
+ # modification, are permitted provided that the following conditions are met:
6
+
7
+ # 1. Redistributions of source code must retain the above copyright notice,
8
+ # this list of conditions and the following disclaimer.
9
+
10
+ # 2. Redistributions in binary form must reproduce the above copyright notice,
11
+ # this list of conditions and the following disclaimer in the documentation
12
+ # and/or other materials provided with the distribution.
13
+
14
+ # 3. Neither the name of the copyright holder nor the names of its contributors
15
+ # may be used to endorse or promote products derived from this software without
16
+ # specific prior written permission.
17
+
18
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19
+ # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22
+ # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23
+ # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24
+ # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26
+ # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27
+ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28
+ # POSSIBILITY OF SUCH DAMAGE.
29
+
30
+ from ...utils._typedefs cimport intp_t, float64_t, uint8_t
31
+ cimport numpy as cnp
32
+
33
+ # This corresponds to the scipy.cluster.hierarchy format
34
+ ctypedef packed struct HIERARCHY_t:
35
+ intp_t left_node
36
+ intp_t right_node
37
+ float64_t value
38
+ intp_t cluster_size
39
+
40
+ # Effectively an edgelist encoding a parent/child pair, along with a value and
41
+ # the corresponding cluster_size in each row providing a tree structure.
42
+ ctypedef packed struct CONDENSED_t:
43
+ intp_t parent
44
+ intp_t child
45
+ float64_t value
46
+ intp_t cluster_size
47
+
48
+ cdef extern from "numpy/arrayobject.h":
49
+ intp_t * PyArray_SHAPE(cnp.PyArrayObject *)
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/hdbscan.py ADDED
@@ -0,0 +1,1018 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ HDBSCAN: Hierarchical Density-Based Spatial Clustering
3
+ of Applications with Noise
4
+ """
5
+ # Authors: Leland McInnes <[email protected]>
6
+ # Steve Astels <[email protected]>
7
+ # John Healy <[email protected]>
8
+ # Meekail Zain <[email protected]>
9
+ # Copyright (c) 2015, Leland McInnes
10
+ # All rights reserved.
11
+
12
+ # Redistribution and use in source and binary forms, with or without
13
+ # modification, are permitted provided that the following conditions are met:
14
+
15
+ # 1. Redistributions of source code must retain the above copyright notice,
16
+ # this list of conditions and the following disclaimer.
17
+
18
+ # 2. Redistributions in binary form must reproduce the above copyright notice,
19
+ # this list of conditions and the following disclaimer in the documentation
20
+ # and/or other materials provided with the distribution.
21
+
22
+ # 3. Neither the name of the copyright holder nor the names of its contributors
23
+ # may be used to endorse or promote products derived from this software without
24
+ # specific prior written permission.
25
+
26
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
27
+ # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
30
+ # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31
+ # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32
+ # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34
+ # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35
+ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36
+ # POSSIBILITY OF SUCH DAMAGE.
37
+
38
+ from numbers import Integral, Real
39
+ from warnings import warn
40
+
41
+ import numpy as np
42
+ from scipy.sparse import csgraph, issparse
43
+
44
+ from ...base import BaseEstimator, ClusterMixin, _fit_context
45
+ from ...metrics import pairwise_distances
46
+ from ...metrics._dist_metrics import DistanceMetric
47
+ from ...neighbors import BallTree, KDTree, NearestNeighbors
48
+ from ...utils._param_validation import Interval, StrOptions
49
+ from ...utils.validation import _allclose_dense_sparse, _assert_all_finite
50
+ from ._linkage import (
51
+ MST_edge_dtype,
52
+ make_single_linkage,
53
+ mst_from_data_matrix,
54
+ mst_from_mutual_reachability,
55
+ )
56
+ from ._reachability import mutual_reachability_graph
57
+ from ._tree import HIERARCHY_dtype, labelling_at_cut, tree_to_labels
58
+
59
+ FAST_METRICS = set(KDTree.valid_metrics + BallTree.valid_metrics)
60
+
61
+ # Encodings are arbitrary but must be strictly negative.
62
+ # The current encodings are chosen as extensions to the -1 noise label.
63
+ # Avoided enums so that the end user only deals with simple labels.
64
+ _OUTLIER_ENCODING: dict = {
65
+ "infinite": {
66
+ "label": -2,
67
+ # The probability could also be 1, since infinite points are certainly
68
+ # infinite outliers, however 0 is convention from the HDBSCAN library
69
+ # implementation.
70
+ "prob": 0,
71
+ },
72
+ "missing": {
73
+ "label": -3,
74
+ # A nan probability is chosen to emphasize the fact that the
75
+ # corresponding data was not considered in the clustering problem.
76
+ "prob": np.nan,
77
+ },
78
+ }
79
+
80
+
81
+ def _brute_mst(mutual_reachability, min_samples):
82
+ """
83
+ Builds a minimum spanning tree (MST) from the provided mutual-reachability
84
+ values. This function dispatches to a custom Cython implementation for
85
+ dense arrays, and `scipy.sparse.csgraph.minimum_spanning_tree` for sparse
86
+ arrays/matrices.
87
+
88
+ Parameters
89
+ ----------
90
+ mututal_reachability_graph: {ndarray, sparse matrix} of shape \
91
+ (n_samples, n_samples)
92
+ Weighted adjacency matrix of the mutual reachability graph.
93
+
94
+ min_samples : int, default=None
95
+ The number of samples in a neighborhood for a point
96
+ to be considered as a core point. This includes the point itself.
97
+
98
+ Returns
99
+ -------
100
+ mst : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype
101
+ The MST representation of the mutual-reachability graph. The MST is
102
+ represented as a collection of edges.
103
+ """
104
+ if not issparse(mutual_reachability):
105
+ return mst_from_mutual_reachability(mutual_reachability)
106
+
107
+ # Check if the mutual reachability matrix has any rows which have
108
+ # less than `min_samples` non-zero elements.
109
+ indptr = mutual_reachability.indptr
110
+ num_points = mutual_reachability.shape[0]
111
+ if any((indptr[i + 1] - indptr[i]) < min_samples for i in range(num_points)):
112
+ raise ValueError(
113
+ f"There exists points with fewer than {min_samples} neighbors. Ensure"
114
+ " your distance matrix has non-zero values for at least"
115
+ f" `min_sample`={min_samples} neighbors for each points (i.e. K-nn"
116
+ " graph), or specify a `max_distance` in `metric_params` to use when"
117
+ " distances are missing."
118
+ )
119
+ # Check connected component on mutual reachability.
120
+ # If more than one connected component is present,
121
+ # it means that the graph is disconnected.
122
+ n_components = csgraph.connected_components(
123
+ mutual_reachability, directed=False, return_labels=False
124
+ )
125
+ if n_components > 1:
126
+ raise ValueError(
127
+ f"Sparse mutual reachability matrix has {n_components} connected"
128
+ " components. HDBSCAN cannot be perfomed on a disconnected graph. Ensure"
129
+ " that the sparse distance matrix has only one connected component."
130
+ )
131
+
132
+ # Compute the minimum spanning tree for the sparse graph
133
+ sparse_min_spanning_tree = csgraph.minimum_spanning_tree(mutual_reachability)
134
+ rows, cols = sparse_min_spanning_tree.nonzero()
135
+ mst = np.rec.fromarrays(
136
+ [rows, cols, sparse_min_spanning_tree.data],
137
+ dtype=MST_edge_dtype,
138
+ )
139
+ return mst
140
+
141
+
142
+ def _process_mst(min_spanning_tree):
143
+ """
144
+ Builds a single-linkage tree (SLT) from the provided minimum spanning tree
145
+ (MST). The MST is first sorted then processed by a custom Cython routine.
146
+
147
+ Parameters
148
+ ----------
149
+ min_spanning_tree : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype
150
+ The MST representation of the mutual-reachability graph. The MST is
151
+ represented as a collection of edges.
152
+
153
+ Returns
154
+ -------
155
+ single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
156
+ The single-linkage tree tree (dendrogram) built from the MST.
157
+ """
158
+ # Sort edges of the min_spanning_tree by weight
159
+ row_order = np.argsort(min_spanning_tree["distance"])
160
+ min_spanning_tree = min_spanning_tree[row_order]
161
+ # Convert edge list into standard hierarchical clustering format
162
+ return make_single_linkage(min_spanning_tree)
163
+
164
+
165
+ def _hdbscan_brute(
166
+ X,
167
+ min_samples=5,
168
+ alpha=None,
169
+ metric="euclidean",
170
+ n_jobs=None,
171
+ copy=False,
172
+ **metric_params,
173
+ ):
174
+ """
175
+ Builds a single-linkage tree (SLT) from the input data `X`. If
176
+ `metric="precomputed"` then `X` must be a symmetric array of distances.
177
+ Otherwise, the pairwise distances are calculated directly and passed to
178
+ `mutual_reachability_graph`.
179
+
180
+ Parameters
181
+ ----------
182
+ X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
183
+ Either the raw data from which to compute the pairwise distances,
184
+ or the precomputed distances.
185
+
186
+ min_samples : int, default=None
187
+ The number of samples in a neighborhood for a point
188
+ to be considered as a core point. This includes the point itself.
189
+
190
+ alpha : float, default=1.0
191
+ A distance scaling parameter as used in robust single linkage.
192
+
193
+ metric : str or callable, default='euclidean'
194
+ The metric to use when calculating distance between instances in a
195
+ feature array.
196
+
197
+ - If metric is a string or callable, it must be one of
198
+ the options allowed by :func:`~sklearn.metrics.pairwise_distances`
199
+ for its metric parameter.
200
+
201
+ - If metric is "precomputed", X is assumed to be a distance matrix and
202
+ must be square.
203
+
204
+ n_jobs : int, default=None
205
+ The number of jobs to use for computing the pairwise distances. This
206
+ works by breaking down the pairwise matrix into n_jobs even slices and
207
+ computing them in parallel. This parameter is passed directly to
208
+ :func:`~sklearn.metrics.pairwise_distances`.
209
+
210
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
211
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
212
+ for more details.
213
+
214
+ copy : bool, default=False
215
+ If `copy=True` then any time an in-place modifications would be made
216
+ that would overwrite `X`, a copy will first be made, guaranteeing that
217
+ the original data will be unchanged. Currently, it only applies when
218
+ `metric="precomputed"`, when passing a dense array or a CSR sparse
219
+ array/matrix.
220
+
221
+ metric_params : dict, default=None
222
+ Arguments passed to the distance metric.
223
+
224
+ Returns
225
+ -------
226
+ single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
227
+ The single-linkage tree tree (dendrogram) built from the MST.
228
+ """
229
+ if metric == "precomputed":
230
+ if X.shape[0] != X.shape[1]:
231
+ raise ValueError(
232
+ "The precomputed distance matrix is expected to be symmetric, however"
233
+ f" it has shape {X.shape}. Please verify that the"
234
+ " distance matrix was constructed correctly."
235
+ )
236
+ if not _allclose_dense_sparse(X, X.T):
237
+ raise ValueError(
238
+ "The precomputed distance matrix is expected to be symmetric, however"
239
+ " its values appear to be asymmetric. Please verify that the distance"
240
+ " matrix was constructed correctly."
241
+ )
242
+
243
+ distance_matrix = X.copy() if copy else X
244
+ else:
245
+ distance_matrix = pairwise_distances(
246
+ X, metric=metric, n_jobs=n_jobs, **metric_params
247
+ )
248
+ distance_matrix /= alpha
249
+
250
+ max_distance = metric_params.get("max_distance", 0.0)
251
+ if issparse(distance_matrix) and distance_matrix.format != "csr":
252
+ # we need CSR format to avoid a conversion in `_brute_mst` when calling
253
+ # `csgraph.connected_components`
254
+ distance_matrix = distance_matrix.tocsr()
255
+
256
+ # Note that `distance_matrix` is manipulated in-place, however we do not
257
+ # need it for anything else past this point, hence the operation is safe.
258
+ mutual_reachability_ = mutual_reachability_graph(
259
+ distance_matrix, min_samples=min_samples, max_distance=max_distance
260
+ )
261
+ min_spanning_tree = _brute_mst(mutual_reachability_, min_samples=min_samples)
262
+ # Warn if the MST couldn't be constructed around the missing distances
263
+ if np.isinf(min_spanning_tree["distance"]).any():
264
+ warn(
265
+ (
266
+ "The minimum spanning tree contains edge weights with value "
267
+ "infinity. Potentially, you are missing too many distances "
268
+ "in the initial distance matrix for the given neighborhood "
269
+ "size."
270
+ ),
271
+ UserWarning,
272
+ )
273
+ return _process_mst(min_spanning_tree)
274
+
275
+
276
+ def _hdbscan_prims(
277
+ X,
278
+ algo,
279
+ min_samples=5,
280
+ alpha=1.0,
281
+ metric="euclidean",
282
+ leaf_size=40,
283
+ n_jobs=None,
284
+ **metric_params,
285
+ ):
286
+ """
287
+ Builds a single-linkage tree (SLT) from the input data `X`. If
288
+ `metric="precomputed"` then `X` must be a symmetric array of distances.
289
+ Otherwise, the pairwise distances are calculated directly and passed to
290
+ `mutual_reachability_graph`.
291
+
292
+ Parameters
293
+ ----------
294
+ X : ndarray of shape (n_samples, n_features)
295
+ The raw data.
296
+
297
+ min_samples : int, default=None
298
+ The number of samples in a neighborhood for a point
299
+ to be considered as a core point. This includes the point itself.
300
+
301
+ alpha : float, default=1.0
302
+ A distance scaling parameter as used in robust single linkage.
303
+
304
+ metric : str or callable, default='euclidean'
305
+ The metric to use when calculating distance between instances in a
306
+ feature array. `metric` must be one of the options allowed by
307
+ :func:`~sklearn.metrics.pairwise_distances` for its metric
308
+ parameter.
309
+
310
+ n_jobs : int, default=None
311
+ The number of jobs to use for computing the pairwise distances. This
312
+ works by breaking down the pairwise matrix into n_jobs even slices and
313
+ computing them in parallel. This parameter is passed directly to
314
+ :func:`~sklearn.metrics.pairwise_distances`.
315
+
316
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
317
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
318
+ for more details.
319
+
320
+ copy : bool, default=False
321
+ If `copy=True` then any time an in-place modifications would be made
322
+ that would overwrite `X`, a copy will first be made, guaranteeing that
323
+ the original data will be unchanged. Currently, it only applies when
324
+ `metric="precomputed"`, when passing a dense array or a CSR sparse
325
+ array/matrix.
326
+
327
+ metric_params : dict, default=None
328
+ Arguments passed to the distance metric.
329
+
330
+ Returns
331
+ -------
332
+ single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
333
+ The single-linkage tree tree (dendrogram) built from the MST.
334
+ """
335
+ # The Cython routines used require contiguous arrays
336
+ X = np.asarray(X, order="C")
337
+
338
+ # Get distance to kth nearest neighbour
339
+ nbrs = NearestNeighbors(
340
+ n_neighbors=min_samples,
341
+ algorithm=algo,
342
+ leaf_size=leaf_size,
343
+ metric=metric,
344
+ metric_params=metric_params,
345
+ n_jobs=n_jobs,
346
+ p=None,
347
+ ).fit(X)
348
+
349
+ neighbors_distances, _ = nbrs.kneighbors(X, min_samples, return_distance=True)
350
+ core_distances = np.ascontiguousarray(neighbors_distances[:, -1])
351
+ dist_metric = DistanceMetric.get_metric(metric, **metric_params)
352
+
353
+ # Mutual reachability distance is implicit in mst_from_data_matrix
354
+ min_spanning_tree = mst_from_data_matrix(X, core_distances, dist_metric, alpha)
355
+ return _process_mst(min_spanning_tree)
356
+
357
+
358
+ def remap_single_linkage_tree(tree, internal_to_raw, non_finite):
359
+ """
360
+ Takes an internal single_linkage_tree structure and adds back in a set of points
361
+ that were initially detected as non-finite and returns that new tree.
362
+ These points will all be merged into the final node at np.inf distance and
363
+ considered noise points.
364
+
365
+ Parameters
366
+ ----------
367
+ tree : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
368
+ The single-linkage tree tree (dendrogram) built from the MST.
369
+ internal_to_raw: dict
370
+ A mapping from internal integer index to the raw integer index
371
+ non_finite : ndarray
372
+ Boolean array of which entries in the raw data are non-finite
373
+ """
374
+ finite_count = len(internal_to_raw)
375
+
376
+ outlier_count = len(non_finite)
377
+ for i, _ in enumerate(tree):
378
+ left = tree[i]["left_node"]
379
+ right = tree[i]["right_node"]
380
+
381
+ if left < finite_count:
382
+ tree[i]["left_node"] = internal_to_raw[left]
383
+ else:
384
+ tree[i]["left_node"] = left + outlier_count
385
+ if right < finite_count:
386
+ tree[i]["right_node"] = internal_to_raw[right]
387
+ else:
388
+ tree[i]["right_node"] = right + outlier_count
389
+
390
+ outlier_tree = np.zeros(len(non_finite), dtype=HIERARCHY_dtype)
391
+ last_cluster_id = max(
392
+ tree[tree.shape[0] - 1]["left_node"], tree[tree.shape[0] - 1]["right_node"]
393
+ )
394
+ last_cluster_size = tree[tree.shape[0] - 1]["cluster_size"]
395
+ for i, outlier in enumerate(non_finite):
396
+ outlier_tree[i] = (outlier, last_cluster_id + 1, np.inf, last_cluster_size + 1)
397
+ last_cluster_id += 1
398
+ last_cluster_size += 1
399
+ tree = np.concatenate([tree, outlier_tree])
400
+ return tree
401
+
402
+
403
+ def _get_finite_row_indices(matrix):
404
+ """
405
+ Returns the indices of the purely finite rows of a
406
+ sparse matrix or dense ndarray
407
+ """
408
+ if issparse(matrix):
409
+ row_indices = np.array(
410
+ [i for i, row in enumerate(matrix.tolil().data) if np.all(np.isfinite(row))]
411
+ )
412
+ else:
413
+ (row_indices,) = np.isfinite(matrix.sum(axis=1)).nonzero()
414
+ return row_indices
415
+
416
+
417
+ class HDBSCAN(ClusterMixin, BaseEstimator):
418
+ """Cluster data using hierarchical density-based clustering.
419
+
420
+ HDBSCAN - Hierarchical Density-Based Spatial Clustering of Applications
421
+ with Noise. Performs :class:`~sklearn.cluster.DBSCAN` over varying epsilon
422
+ values and integrates the result to find a clustering that gives the best
423
+ stability over epsilon.
424
+ This allows HDBSCAN to find clusters of varying densities (unlike
425
+ :class:`~sklearn.cluster.DBSCAN`), and be more robust to parameter selection.
426
+ Read more in the :ref:`User Guide <hdbscan>`.
427
+
428
+ For an example of how to use HDBSCAN, as well as a comparison to
429
+ :class:`~sklearn.cluster.DBSCAN`, please see the :ref:`plotting demo
430
+ <sphx_glr_auto_examples_cluster_plot_hdbscan.py>`.
431
+
432
+ .. versionadded:: 1.3
433
+
434
+ Parameters
435
+ ----------
436
+ min_cluster_size : int, default=5
437
+ The minimum number of samples in a group for that group to be
438
+ considered a cluster; groupings smaller than this size will be left
439
+ as noise.
440
+
441
+ min_samples : int, default=None
442
+ The number of samples in a neighborhood for a point
443
+ to be considered as a core point. This includes the point itself.
444
+ When `None`, defaults to `min_cluster_size`.
445
+
446
+ cluster_selection_epsilon : float, default=0.0
447
+ A distance threshold. Clusters below this value will be merged.
448
+ See [5]_ for more information.
449
+
450
+ max_cluster_size : int, default=None
451
+ A limit to the size of clusters returned by the `"eom"` cluster
452
+ selection algorithm. There is no limit when `max_cluster_size=None`.
453
+ Has no effect if `cluster_selection_method="leaf"`.
454
+
455
+ metric : str or callable, default='euclidean'
456
+ The metric to use when calculating distance between instances in a
457
+ feature array.
458
+
459
+ - If metric is a string or callable, it must be one of
460
+ the options allowed by :func:`~sklearn.metrics.pairwise_distances`
461
+ for its metric parameter.
462
+
463
+ - If metric is "precomputed", X is assumed to be a distance matrix and
464
+ must be square.
465
+
466
+ metric_params : dict, default=None
467
+ Arguments passed to the distance metric.
468
+
469
+ alpha : float, default=1.0
470
+ A distance scaling parameter as used in robust single linkage.
471
+ See [3]_ for more information.
472
+
473
+ algorithm : {"auto", "brute", "kd_tree", "ball_tree"}, default="auto"
474
+ Exactly which algorithm to use for computing core distances; By default
475
+ this is set to `"auto"` which attempts to use a
476
+ :class:`~sklearn.neighbors.KDTree` tree if possible, otherwise it uses
477
+ a :class:`~sklearn.neighbors.BallTree` tree. Both `"kd_tree"` and
478
+ `"ball_tree"` algorithms use the
479
+ :class:`~sklearn.neighbors.NearestNeighbors` estimator.
480
+
481
+ If the `X` passed during `fit` is sparse or `metric` is invalid for
482
+ both :class:`~sklearn.neighbors.KDTree` and
483
+ :class:`~sklearn.neighbors.BallTree`, then it resolves to use the
484
+ `"brute"` algorithm.
485
+
486
+ .. deprecated:: 1.4
487
+ The `'kdtree'` option was deprecated in version 1.4,
488
+ and will be renamed to `'kd_tree'` in 1.6.
489
+
490
+ .. deprecated:: 1.4
491
+ The `'balltree'` option was deprecated in version 1.4,
492
+ and will be renamed to `'ball_tree'` in 1.6.
493
+
494
+ leaf_size : int, default=40
495
+ Leaf size for trees responsible for fast nearest neighbour queries when
496
+ a KDTree or a BallTree are used as core-distance algorithms. A large
497
+ dataset size and small `leaf_size` may induce excessive memory usage.
498
+ If you are running out of memory consider increasing the `leaf_size`
499
+ parameter. Ignored for `algorithm="brute"`.
500
+
501
+ n_jobs : int, default=None
502
+ Number of jobs to run in parallel to calculate distances.
503
+ `None` means 1 unless in a :obj:`joblib.parallel_backend` context.
504
+ `-1` means using all processors. See :term:`Glossary <n_jobs>`
505
+ for more details.
506
+
507
+ cluster_selection_method : {"eom", "leaf"}, default="eom"
508
+ The method used to select clusters from the condensed tree. The
509
+ standard approach for HDBSCAN* is to use an Excess of Mass (`"eom"`)
510
+ algorithm to find the most persistent clusters. Alternatively you can
511
+ instead select the clusters at the leaves of the tree -- this provides
512
+ the most fine grained and homogeneous clusters.
513
+
514
+ allow_single_cluster : bool, default=False
515
+ By default HDBSCAN* will not produce a single cluster, setting this
516
+ to True will override this and allow single cluster results in
517
+ the case that you feel this is a valid result for your dataset.
518
+
519
+ store_centers : str, default=None
520
+ Which, if any, cluster centers to compute and store. The options are:
521
+
522
+ - `None` which does not compute nor store any centers.
523
+ - `"centroid"` which calculates the center by taking the weighted
524
+ average of their positions. Note that the algorithm uses the
525
+ euclidean metric and does not guarantee that the output will be
526
+ an observed data point.
527
+ - `"medoid"` which calculates the center by taking the point in the
528
+ fitted data which minimizes the distance to all other points in
529
+ the cluster. This is slower than "centroid" since it requires
530
+ computing additional pairwise distances between points of the
531
+ same cluster but guarantees the output is an observed data point.
532
+ The medoid is also well-defined for arbitrary metrics, and does not
533
+ depend on a euclidean metric.
534
+ - `"both"` which computes and stores both forms of centers.
535
+
536
+ copy : bool, default=False
537
+ If `copy=True` then any time an in-place modifications would be made
538
+ that would overwrite data passed to :term:`fit`, a copy will first be
539
+ made, guaranteeing that the original data will be unchanged.
540
+ Currently, it only applies when `metric="precomputed"`, when passing
541
+ a dense array or a CSR sparse matrix and when `algorithm="brute"`.
542
+
543
+ Attributes
544
+ ----------
545
+ labels_ : ndarray of shape (n_samples,)
546
+ Cluster labels for each point in the dataset given to :term:`fit`.
547
+ Outliers are labeled as follows:
548
+
549
+ - Noisy samples are given the label -1.
550
+ - Samples with infinite elements (+/- np.inf) are given the label -2.
551
+ - Samples with missing data are given the label -3, even if they
552
+ also have infinite elements.
553
+
554
+ probabilities_ : ndarray of shape (n_samples,)
555
+ The strength with which each sample is a member of its assigned
556
+ cluster.
557
+
558
+ - Clustered samples have probabilities proportional to the degree that
559
+ they persist as part of the cluster.
560
+ - Noisy samples have probability zero.
561
+ - Samples with infinite elements (+/- np.inf) have probability 0.
562
+ - Samples with missing data have probability `np.nan`.
563
+
564
+ n_features_in_ : int
565
+ Number of features seen during :term:`fit`.
566
+
567
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
568
+ Names of features seen during :term:`fit`. Defined only when `X`
569
+ has feature names that are all strings.
570
+
571
+ centroids_ : ndarray of shape (n_clusters, n_features)
572
+ A collection containing the centroid of each cluster calculated under
573
+ the standard euclidean metric. The centroids may fall "outside" their
574
+ respective clusters if the clusters themselves are non-convex.
575
+
576
+ Note that `n_clusters` only counts non-outlier clusters. That is to
577
+ say, the `-1, -2, -3` labels for the outlier clusters are excluded.
578
+
579
+ medoids_ : ndarray of shape (n_clusters, n_features)
580
+ A collection containing the medoid of each cluster calculated under
581
+ the whichever metric was passed to the `metric` parameter. The
582
+ medoids are points in the original cluster which minimize the average
583
+ distance to all other points in that cluster under the chosen metric.
584
+ These can be thought of as the result of projecting the `metric`-based
585
+ centroid back onto the cluster.
586
+
587
+ Note that `n_clusters` only counts non-outlier clusters. That is to
588
+ say, the `-1, -2, -3` labels for the outlier clusters are excluded.
589
+
590
+ See Also
591
+ --------
592
+ DBSCAN : Density-Based Spatial Clustering of Applications
593
+ with Noise.
594
+ OPTICS : Ordering Points To Identify the Clustering Structure.
595
+ Birch : Memory-efficient, online-learning algorithm.
596
+
597
+ References
598
+ ----------
599
+
600
+ .. [1] :doi:`Campello, R. J., Moulavi, D., & Sander, J. Density-based clustering
601
+ based on hierarchical density estimates.
602
+ <10.1007/978-3-642-37456-2_14>`
603
+ .. [2] :doi:`Campello, R. J., Moulavi, D., Zimek, A., & Sander, J.
604
+ Hierarchical density estimates for data clustering, visualization,
605
+ and outlier detection.<10.1145/2733381>`
606
+
607
+ .. [3] `Chaudhuri, K., & Dasgupta, S. Rates of convergence for the
608
+ cluster tree.
609
+ <https://papers.nips.cc/paper/2010/hash/
610
+ b534ba68236ba543ae44b22bd110a1d6-Abstract.html>`_
611
+
612
+ .. [4] `Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and
613
+ Sander, J. Density-Based Clustering Validation.
614
+ <https://www.dbs.ifi.lmu.de/~zimek/publications/SDM2014/DBCV.pdf>`_
615
+
616
+ .. [5] :arxiv:`Malzer, C., & Baum, M. "A Hybrid Approach To Hierarchical
617
+ Density-based Cluster Selection."<1911.02282>`.
618
+
619
+ Examples
620
+ --------
621
+ >>> from sklearn.cluster import HDBSCAN
622
+ >>> from sklearn.datasets import load_digits
623
+ >>> X, _ = load_digits(return_X_y=True)
624
+ >>> hdb = HDBSCAN(min_cluster_size=20)
625
+ >>> hdb.fit(X)
626
+ HDBSCAN(min_cluster_size=20)
627
+ >>> hdb.labels_
628
+ array([ 2, 6, -1, ..., -1, -1, -1])
629
+ """
630
+
631
+ _parameter_constraints = {
632
+ "min_cluster_size": [Interval(Integral, left=2, right=None, closed="left")],
633
+ "min_samples": [Interval(Integral, left=1, right=None, closed="left"), None],
634
+ "cluster_selection_epsilon": [
635
+ Interval(Real, left=0, right=None, closed="left")
636
+ ],
637
+ "max_cluster_size": [
638
+ None,
639
+ Interval(Integral, left=1, right=None, closed="left"),
640
+ ],
641
+ "metric": [StrOptions(FAST_METRICS | {"precomputed"}), callable],
642
+ "metric_params": [dict, None],
643
+ "alpha": [Interval(Real, left=0, right=None, closed="neither")],
644
+ # TODO(1.6): Remove "kdtree" and "balltree" option
645
+ "algorithm": [
646
+ StrOptions(
647
+ {"auto", "brute", "kd_tree", "ball_tree", "kdtree", "balltree"},
648
+ deprecated={"kdtree", "balltree"},
649
+ ),
650
+ ],
651
+ "leaf_size": [Interval(Integral, left=1, right=None, closed="left")],
652
+ "n_jobs": [Integral, None],
653
+ "cluster_selection_method": [StrOptions({"eom", "leaf"})],
654
+ "allow_single_cluster": ["boolean"],
655
+ "store_centers": [None, StrOptions({"centroid", "medoid", "both"})],
656
+ "copy": ["boolean"],
657
+ }
658
+
659
+ def __init__(
660
+ self,
661
+ min_cluster_size=5,
662
+ min_samples=None,
663
+ cluster_selection_epsilon=0.0,
664
+ max_cluster_size=None,
665
+ metric="euclidean",
666
+ metric_params=None,
667
+ alpha=1.0,
668
+ algorithm="auto",
669
+ leaf_size=40,
670
+ n_jobs=None,
671
+ cluster_selection_method="eom",
672
+ allow_single_cluster=False,
673
+ store_centers=None,
674
+ copy=False,
675
+ ):
676
+ self.min_cluster_size = min_cluster_size
677
+ self.min_samples = min_samples
678
+ self.alpha = alpha
679
+ self.max_cluster_size = max_cluster_size
680
+ self.cluster_selection_epsilon = cluster_selection_epsilon
681
+ self.metric = metric
682
+ self.metric_params = metric_params
683
+ self.algorithm = algorithm
684
+ self.leaf_size = leaf_size
685
+ self.n_jobs = n_jobs
686
+ self.cluster_selection_method = cluster_selection_method
687
+ self.allow_single_cluster = allow_single_cluster
688
+ self.store_centers = store_centers
689
+ self.copy = copy
690
+
691
+ @_fit_context(
692
+ # HDBSCAN.metric is not validated yet
693
+ prefer_skip_nested_validation=False
694
+ )
695
+ def fit(self, X, y=None):
696
+ """Find clusters based on hierarchical density-based clustering.
697
+
698
+ Parameters
699
+ ----------
700
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
701
+ ndarray of shape (n_samples, n_samples)
702
+ A feature array, or array of distances between samples if
703
+ `metric='precomputed'`.
704
+
705
+ y : None
706
+ Ignored.
707
+
708
+ Returns
709
+ -------
710
+ self : object
711
+ Returns self.
712
+ """
713
+ if self.metric == "precomputed" and self.store_centers is not None:
714
+ raise ValueError(
715
+ "Cannot store centers when using a precomputed distance matrix."
716
+ )
717
+
718
+ self._metric_params = self.metric_params or {}
719
+ if self.metric != "precomputed":
720
+ # Non-precomputed matrices may contain non-finite values.
721
+ X = self._validate_data(
722
+ X,
723
+ accept_sparse=["csr", "lil"],
724
+ force_all_finite=False,
725
+ dtype=np.float64,
726
+ )
727
+ self._raw_data = X
728
+ all_finite = True
729
+ try:
730
+ _assert_all_finite(X.data if issparse(X) else X)
731
+ except ValueError:
732
+ all_finite = False
733
+
734
+ if not all_finite:
735
+ # Pass only the purely finite indices into hdbscan
736
+ # We will later assign all non-finite points their
737
+ # corresponding labels, as specified in `_OUTLIER_ENCODING`
738
+
739
+ # Reduce X to make the checks for missing/outlier samples more
740
+ # convenient.
741
+ reduced_X = X.sum(axis=1)
742
+
743
+ # Samples with missing data are denoted by the presence of
744
+ # `np.nan`
745
+ missing_index = np.isnan(reduced_X).nonzero()[0]
746
+
747
+ # Outlier samples are denoted by the presence of `np.inf`
748
+ infinite_index = np.isinf(reduced_X).nonzero()[0]
749
+
750
+ # Continue with only finite samples
751
+ finite_index = _get_finite_row_indices(X)
752
+ internal_to_raw = {x: y for x, y in enumerate(finite_index)}
753
+ X = X[finite_index]
754
+ elif issparse(X):
755
+ # Handle sparse precomputed distance matrices separately
756
+ X = self._validate_data(
757
+ X,
758
+ accept_sparse=["csr", "lil"],
759
+ dtype=np.float64,
760
+ )
761
+ else:
762
+ # Only non-sparse, precomputed distance matrices are handled here
763
+ # and thereby allowed to contain numpy.inf for missing distances
764
+
765
+ # Perform data validation after removing infinite values (numpy.inf)
766
+ # from the given distance matrix.
767
+ X = self._validate_data(X, force_all_finite=False, dtype=np.float64)
768
+ if np.isnan(X).any():
769
+ # TODO: Support np.nan in Cython implementation for precomputed
770
+ # dense HDBSCAN
771
+ raise ValueError("np.nan values found in precomputed-dense")
772
+ if X.shape[0] == 1:
773
+ raise ValueError("n_samples=1 while HDBSCAN requires more than one sample")
774
+ self._min_samples = (
775
+ self.min_cluster_size if self.min_samples is None else self.min_samples
776
+ )
777
+
778
+ if self._min_samples > X.shape[0]:
779
+ raise ValueError(
780
+ f"min_samples ({self._min_samples}) must be at most the number of"
781
+ f" samples in X ({X.shape[0]})"
782
+ )
783
+
784
+ # TODO(1.6): Remove
785
+ if self.algorithm == "kdtree":
786
+ warn(
787
+ (
788
+ "`algorithm='kdtree'`has been deprecated in 1.4 and will be renamed"
789
+ " to'kd_tree'`in 1.6. To keep the past behaviour, set"
790
+ " `algorithm='kd_tree'`."
791
+ ),
792
+ FutureWarning,
793
+ )
794
+ self.algorithm = "kd_tree"
795
+
796
+ # TODO(1.6): Remove
797
+ if self.algorithm == "balltree":
798
+ warn(
799
+ (
800
+ "`algorithm='balltree'`has been deprecated in 1.4 and will be"
801
+ " renamed to'ball_tree'`in 1.6. To keep the past behaviour, set"
802
+ " `algorithm='ball_tree'`."
803
+ ),
804
+ FutureWarning,
805
+ )
806
+ self.algorithm = "ball_tree"
807
+
808
+ mst_func = None
809
+ kwargs = dict(
810
+ X=X,
811
+ min_samples=self._min_samples,
812
+ alpha=self.alpha,
813
+ metric=self.metric,
814
+ n_jobs=self.n_jobs,
815
+ **self._metric_params,
816
+ )
817
+ if self.algorithm == "kd_tree" and self.metric not in KDTree.valid_metrics:
818
+ raise ValueError(
819
+ f"{self.metric} is not a valid metric for a KDTree-based algorithm."
820
+ " Please select a different metric."
821
+ )
822
+ elif (
823
+ self.algorithm == "ball_tree" and self.metric not in BallTree.valid_metrics
824
+ ):
825
+ raise ValueError(
826
+ f"{self.metric} is not a valid metric for a BallTree-based algorithm."
827
+ " Please select a different metric."
828
+ )
829
+
830
+ if self.algorithm != "auto":
831
+ if (
832
+ self.metric != "precomputed"
833
+ and issparse(X)
834
+ and self.algorithm != "brute"
835
+ ):
836
+ raise ValueError("Sparse data matrices only support algorithm `brute`.")
837
+
838
+ if self.algorithm == "brute":
839
+ mst_func = _hdbscan_brute
840
+ kwargs["copy"] = self.copy
841
+ elif self.algorithm == "kd_tree":
842
+ mst_func = _hdbscan_prims
843
+ kwargs["algo"] = "kd_tree"
844
+ kwargs["leaf_size"] = self.leaf_size
845
+ else:
846
+ mst_func = _hdbscan_prims
847
+ kwargs["algo"] = "ball_tree"
848
+ kwargs["leaf_size"] = self.leaf_size
849
+ else:
850
+ if issparse(X) or self.metric not in FAST_METRICS:
851
+ # We can't do much with sparse matrices ...
852
+ mst_func = _hdbscan_brute
853
+ kwargs["copy"] = self.copy
854
+ elif self.metric in KDTree.valid_metrics:
855
+ # TODO: Benchmark KD vs Ball Tree efficiency
856
+ mst_func = _hdbscan_prims
857
+ kwargs["algo"] = "kd_tree"
858
+ kwargs["leaf_size"] = self.leaf_size
859
+ else:
860
+ # Metric is a valid BallTree metric
861
+ mst_func = _hdbscan_prims
862
+ kwargs["algo"] = "ball_tree"
863
+ kwargs["leaf_size"] = self.leaf_size
864
+
865
+ self._single_linkage_tree_ = mst_func(**kwargs)
866
+
867
+ self.labels_, self.probabilities_ = tree_to_labels(
868
+ self._single_linkage_tree_,
869
+ self.min_cluster_size,
870
+ self.cluster_selection_method,
871
+ self.allow_single_cluster,
872
+ self.cluster_selection_epsilon,
873
+ self.max_cluster_size,
874
+ )
875
+ if self.metric != "precomputed" and not all_finite:
876
+ # Remap indices to align with original data in the case of
877
+ # non-finite entries. Samples with np.inf are mapped to -1 and
878
+ # those with np.nan are mapped to -2.
879
+ self._single_linkage_tree_ = remap_single_linkage_tree(
880
+ self._single_linkage_tree_,
881
+ internal_to_raw,
882
+ # There may be overlap for points w/ both `np.inf` and `np.nan`
883
+ non_finite=set(np.hstack([infinite_index, missing_index])),
884
+ )
885
+ new_labels = np.empty(self._raw_data.shape[0], dtype=np.int32)
886
+ new_labels[finite_index] = self.labels_
887
+ new_labels[infinite_index] = _OUTLIER_ENCODING["infinite"]["label"]
888
+ new_labels[missing_index] = _OUTLIER_ENCODING["missing"]["label"]
889
+ self.labels_ = new_labels
890
+
891
+ new_probabilities = np.zeros(self._raw_data.shape[0], dtype=np.float64)
892
+ new_probabilities[finite_index] = self.probabilities_
893
+ # Infinite outliers have probability 0 by convention, though this
894
+ # is arbitrary.
895
+ new_probabilities[infinite_index] = _OUTLIER_ENCODING["infinite"]["prob"]
896
+ new_probabilities[missing_index] = _OUTLIER_ENCODING["missing"]["prob"]
897
+ self.probabilities_ = new_probabilities
898
+
899
+ if self.store_centers:
900
+ self._weighted_cluster_center(X)
901
+ return self
902
+
903
+ def fit_predict(self, X, y=None):
904
+ """Cluster X and return the associated cluster labels.
905
+
906
+ Parameters
907
+ ----------
908
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
909
+ ndarray of shape (n_samples, n_samples)
910
+ A feature array, or array of distances between samples if
911
+ `metric='precomputed'`.
912
+
913
+ y : None
914
+ Ignored.
915
+
916
+ Returns
917
+ -------
918
+ y : ndarray of shape (n_samples,)
919
+ Cluster labels.
920
+ """
921
+ self.fit(X)
922
+ return self.labels_
923
+
924
+ def _weighted_cluster_center(self, X):
925
+ """Calculate and store the centroids/medoids of each cluster.
926
+
927
+ This requires `X` to be a raw feature array, not precomputed
928
+ distances. Rather than return outputs directly, this helper method
929
+ instead stores them in the `self.{centroids, medoids}_` attributes.
930
+ The choice for which attributes are calculated and stored is mediated
931
+ by the value of `self.store_centers`.
932
+
933
+ Parameters
934
+ ----------
935
+ X : ndarray of shape (n_samples, n_features)
936
+ The feature array that the estimator was fit with.
937
+
938
+ """
939
+ # Number of non-noise clusters
940
+ n_clusters = len(set(self.labels_) - {-1, -2})
941
+ mask = np.empty((X.shape[0],), dtype=np.bool_)
942
+ make_centroids = self.store_centers in ("centroid", "both")
943
+ make_medoids = self.store_centers in ("medoid", "both")
944
+
945
+ if make_centroids:
946
+ self.centroids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)
947
+ if make_medoids:
948
+ self.medoids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)
949
+
950
+ # Need to handle iteratively seen each cluster may have a different
951
+ # number of samples, hence we can't create a homogeneous 3D array.
952
+ for idx in range(n_clusters):
953
+ mask = self.labels_ == idx
954
+ data = X[mask]
955
+ strength = self.probabilities_[mask]
956
+ if make_centroids:
957
+ self.centroids_[idx] = np.average(data, weights=strength, axis=0)
958
+ if make_medoids:
959
+ # TODO: Implement weighted argmin PWD backend
960
+ dist_mat = pairwise_distances(
961
+ data, metric=self.metric, **self._metric_params
962
+ )
963
+ dist_mat = dist_mat * strength
964
+ medoid_index = np.argmin(dist_mat.sum(axis=1))
965
+ self.medoids_[idx] = data[medoid_index]
966
+ return
967
+
968
+ def dbscan_clustering(self, cut_distance, min_cluster_size=5):
969
+ """Return clustering given by DBSCAN without border points.
970
+
971
+ Return clustering that would be equivalent to running DBSCAN* for a
972
+ particular cut_distance (or epsilon) DBSCAN* can be thought of as
973
+ DBSCAN without the border points. As such these results may differ
974
+ slightly from `cluster.DBSCAN` due to the difference in implementation
975
+ over the non-core points.
976
+
977
+ This can also be thought of as a flat clustering derived from constant
978
+ height cut through the single linkage tree.
979
+
980
+ This represents the result of selecting a cut value for robust single linkage
981
+ clustering. The `min_cluster_size` allows the flat clustering to declare noise
982
+ points (and cluster smaller than `min_cluster_size`).
983
+
984
+ Parameters
985
+ ----------
986
+ cut_distance : float
987
+ The mutual reachability distance cut value to use to generate a
988
+ flat clustering.
989
+
990
+ min_cluster_size : int, default=5
991
+ Clusters smaller than this value with be called 'noise' and remain
992
+ unclustered in the resulting flat clustering.
993
+
994
+ Returns
995
+ -------
996
+ labels : ndarray of shape (n_samples,)
997
+ An array of cluster labels, one per datapoint.
998
+ Outliers are labeled as follows:
999
+
1000
+ - Noisy samples are given the label -1.
1001
+ - Samples with infinite elements (+/- np.inf) are given the label -2.
1002
+ - Samples with missing data are given the label -3, even if they
1003
+ also have infinite elements.
1004
+ """
1005
+ labels = labelling_at_cut(
1006
+ self._single_linkage_tree_, cut_distance, min_cluster_size
1007
+ )
1008
+ # Infer indices from labels generated during `fit`
1009
+ infinite_index = self.labels_ == _OUTLIER_ENCODING["infinite"]["label"]
1010
+ missing_index = self.labels_ == _OUTLIER_ENCODING["missing"]["label"]
1011
+
1012
+ # Overwrite infinite/missing outlier samples (otherwise simple noise)
1013
+ labels[infinite_index] = _OUTLIER_ENCODING["infinite"]["label"]
1014
+ labels[missing_index] = _OUTLIER_ENCODING["missing"]["label"]
1015
+ return labels
1016
+
1017
+ def _more_tags(self):
1018
+ return {"allow_nan": self.metric != "precomputed"}
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/test_reachibility.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/test_reachibility.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.cluster._hdbscan._reachability import mutual_reachability_graph
5
+ from sklearn.utils._testing import (
6
+ _convert_container,
7
+ assert_allclose,
8
+ )
9
+
10
+
11
+ def test_mutual_reachability_graph_error_sparse_format():
12
+ """Check that we raise an error if the sparse format is not CSR."""
13
+ rng = np.random.RandomState(0)
14
+ X = rng.randn(10, 10)
15
+ X = X.T @ X
16
+ np.fill_diagonal(X, 0.0)
17
+ X = _convert_container(X, "sparse_csc")
18
+
19
+ err_msg = "Only sparse CSR matrices are supported"
20
+ with pytest.raises(ValueError, match=err_msg):
21
+ mutual_reachability_graph(X)
22
+
23
+
24
+ @pytest.mark.parametrize("array_type", ["array", "sparse_csr"])
25
+ def test_mutual_reachability_graph_inplace(array_type):
26
+ """Check that the operation is happening inplace."""
27
+ rng = np.random.RandomState(0)
28
+ X = rng.randn(10, 10)
29
+ X = X.T @ X
30
+ np.fill_diagonal(X, 0.0)
31
+ X = _convert_container(X, array_type)
32
+
33
+ mr_graph = mutual_reachability_graph(X)
34
+
35
+ assert id(mr_graph) == id(X)
36
+
37
+
38
+ def test_mutual_reachability_graph_equivalence_dense_sparse():
39
+ """Check that we get the same results for dense and sparse implementation."""
40
+ rng = np.random.RandomState(0)
41
+ X = rng.randn(5, 5)
42
+ X_dense = X.T @ X
43
+ X_sparse = _convert_container(X_dense, "sparse_csr")
44
+
45
+ mr_graph_dense = mutual_reachability_graph(X_dense, min_samples=3)
46
+ mr_graph_sparse = mutual_reachability_graph(X_sparse, min_samples=3)
47
+
48
+ assert_allclose(mr_graph_dense, mr_graph_sparse.toarray())
49
+
50
+
51
+ @pytest.mark.parametrize("array_type", ["array", "sparse_csr"])
52
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
53
+ def test_mutual_reachability_graph_preserve_dtype(array_type, dtype):
54
+ """Check that the computation preserve dtype thanks to fused types."""
55
+ rng = np.random.RandomState(0)
56
+ X = rng.randn(10, 10)
57
+ X = (X.T @ X).astype(dtype)
58
+ np.fill_diagonal(X, 0.0)
59
+ X = _convert_container(X, array_type)
60
+
61
+ assert X.dtype == dtype
62
+ mr_graph = mutual_reachability_graph(X)
63
+ assert mr_graph.dtype == dtype
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (332 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.pxd ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from ..utils._typedefs cimport intp_t
2
+
3
+ cdef class UnionFind:
4
+ cdef intp_t next_label
5
+ cdef intp_t[:] parent
6
+ cdef intp_t[:] size
7
+
8
+ cdef void union(self, intp_t m, intp_t n) noexcept
9
+ cdef intp_t fast_find(self, intp_t n) noexcept
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (529 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.pxd ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from cython cimport floating
2
+
3
+
4
+ cdef floating _euclidean_dense_dense(
5
+ const floating*,
6
+ const floating*,
7
+ int,
8
+ bint
9
+ ) noexcept nogil
10
+
11
+ cdef floating _euclidean_sparse_dense(
12
+ const floating[::1],
13
+ const int[::1],
14
+ const floating[::1],
15
+ floating,
16
+ bint
17
+ ) noexcept nogil
18
+
19
+ cpdef void _relocate_empty_clusters_dense(
20
+ const floating[:, ::1],
21
+ const floating[::1],
22
+ const floating[:, ::1],
23
+ floating[:, ::1],
24
+ floating[::1],
25
+ const int[::1]
26
+ )
27
+
28
+ cpdef void _relocate_empty_clusters_sparse(
29
+ const floating[::1],
30
+ const int[::1],
31
+ const int[::1],
32
+ const floating[::1],
33
+ const floating[:, ::1],
34
+ floating[:, ::1],
35
+ floating[::1],
36
+ const int[::1]
37
+ )
38
+
39
+ cdef void _average_centers(
40
+ floating[:, ::1],
41
+ const floating[::1]
42
+ )
43
+
44
+ cdef void _center_shift(
45
+ const floating[:, ::1],
46
+ const floating[:, ::1],
47
+ floating[::1]
48
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_elkan.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (526 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_lloyd.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (381 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_minibatch.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (324 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_kmeans.py ADDED
@@ -0,0 +1,2318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """K-means clustering."""
2
+
3
+ # Authors: Gael Varoquaux <[email protected]>
4
+ # Thomas Rueckstiess <[email protected]>
5
+ # James Bergstra <[email protected]>
6
+ # Jan Schlueter <[email protected]>
7
+ # Nelle Varoquaux
8
+ # Peter Prettenhofer <[email protected]>
9
+ # Olivier Grisel <[email protected]>
10
+ # Mathieu Blondel <[email protected]>
11
+ # Robert Layton <[email protected]>
12
+ # License: BSD 3 clause
13
+
14
+ import warnings
15
+ from abc import ABC, abstractmethod
16
+ from numbers import Integral, Real
17
+
18
+ import numpy as np
19
+ import scipy.sparse as sp
20
+
21
+ from ..base import (
22
+ BaseEstimator,
23
+ ClassNamePrefixFeaturesOutMixin,
24
+ ClusterMixin,
25
+ TransformerMixin,
26
+ _fit_context,
27
+ )
28
+ from ..exceptions import ConvergenceWarning
29
+ from ..metrics.pairwise import _euclidean_distances, euclidean_distances
30
+ from ..utils import check_array, check_random_state
31
+ from ..utils._openmp_helpers import _openmp_effective_n_threads
32
+ from ..utils._param_validation import Interval, StrOptions, validate_params
33
+ from ..utils.extmath import row_norms, stable_cumsum
34
+ from ..utils.fixes import threadpool_info, threadpool_limits
35
+ from ..utils.sparsefuncs import mean_variance_axis
36
+ from ..utils.sparsefuncs_fast import assign_rows_csr
37
+ from ..utils.validation import (
38
+ _check_sample_weight,
39
+ _is_arraylike_not_scalar,
40
+ check_is_fitted,
41
+ )
42
+ from ._k_means_common import (
43
+ CHUNK_SIZE,
44
+ _inertia_dense,
45
+ _inertia_sparse,
46
+ _is_same_clustering,
47
+ )
48
+ from ._k_means_elkan import (
49
+ elkan_iter_chunked_dense,
50
+ elkan_iter_chunked_sparse,
51
+ init_bounds_dense,
52
+ init_bounds_sparse,
53
+ )
54
+ from ._k_means_lloyd import lloyd_iter_chunked_dense, lloyd_iter_chunked_sparse
55
+ from ._k_means_minibatch import _minibatch_update_dense, _minibatch_update_sparse
56
+
57
+ ###############################################################################
58
+ # Initialization heuristic
59
+
60
+
61
+ @validate_params(
62
+ {
63
+ "X": ["array-like", "sparse matrix"],
64
+ "n_clusters": [Interval(Integral, 1, None, closed="left")],
65
+ "sample_weight": ["array-like", None],
66
+ "x_squared_norms": ["array-like", None],
67
+ "random_state": ["random_state"],
68
+ "n_local_trials": [Interval(Integral, 1, None, closed="left"), None],
69
+ },
70
+ prefer_skip_nested_validation=True,
71
+ )
72
+ def kmeans_plusplus(
73
+ X,
74
+ n_clusters,
75
+ *,
76
+ sample_weight=None,
77
+ x_squared_norms=None,
78
+ random_state=None,
79
+ n_local_trials=None,
80
+ ):
81
+ """Init n_clusters seeds according to k-means++.
82
+
83
+ .. versionadded:: 0.24
84
+
85
+ Parameters
86
+ ----------
87
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
88
+ The data to pick seeds from.
89
+
90
+ n_clusters : int
91
+ The number of centroids to initialize.
92
+
93
+ sample_weight : array-like of shape (n_samples,), default=None
94
+ The weights for each observation in `X`. If `None`, all observations
95
+ are assigned equal weight. `sample_weight` is ignored if `init`
96
+ is a callable or a user provided array.
97
+
98
+ .. versionadded:: 1.3
99
+
100
+ x_squared_norms : array-like of shape (n_samples,), default=None
101
+ Squared Euclidean norm of each data point.
102
+
103
+ random_state : int or RandomState instance, default=None
104
+ Determines random number generation for centroid initialization. Pass
105
+ an int for reproducible output across multiple function calls.
106
+ See :term:`Glossary <random_state>`.
107
+
108
+ n_local_trials : int, default=None
109
+ The number of seeding trials for each center (except the first),
110
+ of which the one reducing inertia the most is greedily chosen.
111
+ Set to None to make the number of trials depend logarithmically
112
+ on the number of seeds (2+log(k)) which is the recommended setting.
113
+ Setting to 1 disables the greedy cluster selection and recovers the
114
+ vanilla k-means++ algorithm which was empirically shown to work less
115
+ well than its greedy variant.
116
+
117
+ Returns
118
+ -------
119
+ centers : ndarray of shape (n_clusters, n_features)
120
+ The initial centers for k-means.
121
+
122
+ indices : ndarray of shape (n_clusters,)
123
+ The index location of the chosen centers in the data array X. For a
124
+ given index and center, X[index] = center.
125
+
126
+ Notes
127
+ -----
128
+ Selects initial cluster centers for k-mean clustering in a smart way
129
+ to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
130
+ "k-means++: the advantages of careful seeding". ACM-SIAM symposium
131
+ on Discrete algorithms. 2007
132
+
133
+ Examples
134
+ --------
135
+
136
+ >>> from sklearn.cluster import kmeans_plusplus
137
+ >>> import numpy as np
138
+ >>> X = np.array([[1, 2], [1, 4], [1, 0],
139
+ ... [10, 2], [10, 4], [10, 0]])
140
+ >>> centers, indices = kmeans_plusplus(X, n_clusters=2, random_state=0)
141
+ >>> centers
142
+ array([[10, 2],
143
+ [ 1, 0]])
144
+ >>> indices
145
+ array([3, 2])
146
+ """
147
+ # Check data
148
+ check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
149
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
150
+
151
+ if X.shape[0] < n_clusters:
152
+ raise ValueError(
153
+ f"n_samples={X.shape[0]} should be >= n_clusters={n_clusters}."
154
+ )
155
+
156
+ # Check parameters
157
+ if x_squared_norms is None:
158
+ x_squared_norms = row_norms(X, squared=True)
159
+ else:
160
+ x_squared_norms = check_array(x_squared_norms, dtype=X.dtype, ensure_2d=False)
161
+
162
+ if x_squared_norms.shape[0] != X.shape[0]:
163
+ raise ValueError(
164
+ f"The length of x_squared_norms {x_squared_norms.shape[0]} should "
165
+ f"be equal to the length of n_samples {X.shape[0]}."
166
+ )
167
+
168
+ random_state = check_random_state(random_state)
169
+
170
+ # Call private k-means++
171
+ centers, indices = _kmeans_plusplus(
172
+ X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials
173
+ )
174
+
175
+ return centers, indices
176
+
177
+
178
+ def _kmeans_plusplus(
179
+ X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials=None
180
+ ):
181
+ """Computational component for initialization of n_clusters by
182
+ k-means++. Prior validation of data is assumed.
183
+
184
+ Parameters
185
+ ----------
186
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
187
+ The data to pick seeds for.
188
+
189
+ n_clusters : int
190
+ The number of seeds to choose.
191
+
192
+ sample_weight : ndarray of shape (n_samples,)
193
+ The weights for each observation in `X`.
194
+
195
+ x_squared_norms : ndarray of shape (n_samples,)
196
+ Squared Euclidean norm of each data point.
197
+
198
+ random_state : RandomState instance
199
+ The generator used to initialize the centers.
200
+ See :term:`Glossary <random_state>`.
201
+
202
+ n_local_trials : int, default=None
203
+ The number of seeding trials for each center (except the first),
204
+ of which the one reducing inertia the most is greedily chosen.
205
+ Set to None to make the number of trials depend logarithmically
206
+ on the number of seeds (2+log(k)); this is the default.
207
+
208
+ Returns
209
+ -------
210
+ centers : ndarray of shape (n_clusters, n_features)
211
+ The initial centers for k-means.
212
+
213
+ indices : ndarray of shape (n_clusters,)
214
+ The index location of the chosen centers in the data array X. For a
215
+ given index and center, X[index] = center.
216
+ """
217
+ n_samples, n_features = X.shape
218
+
219
+ centers = np.empty((n_clusters, n_features), dtype=X.dtype)
220
+
221
+ # Set the number of local seeding trials if none is given
222
+ if n_local_trials is None:
223
+ # This is what Arthur/Vassilvitskii tried, but did not report
224
+ # specific results for other than mentioning in the conclusion
225
+ # that it helped.
226
+ n_local_trials = 2 + int(np.log(n_clusters))
227
+
228
+ # Pick first center randomly and track index of point
229
+ center_id = random_state.choice(n_samples, p=sample_weight / sample_weight.sum())
230
+ indices = np.full(n_clusters, -1, dtype=int)
231
+ if sp.issparse(X):
232
+ centers[0] = X[[center_id]].toarray()
233
+ else:
234
+ centers[0] = X[center_id]
235
+ indices[0] = center_id
236
+
237
+ # Initialize list of closest distances and calculate current potential
238
+ closest_dist_sq = _euclidean_distances(
239
+ centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True
240
+ )
241
+ current_pot = closest_dist_sq @ sample_weight
242
+
243
+ # Pick the remaining n_clusters-1 points
244
+ for c in range(1, n_clusters):
245
+ # Choose center candidates by sampling with probability proportional
246
+ # to the squared distance to the closest existing center
247
+ rand_vals = random_state.uniform(size=n_local_trials) * current_pot
248
+ candidate_ids = np.searchsorted(
249
+ stable_cumsum(sample_weight * closest_dist_sq), rand_vals
250
+ )
251
+ # XXX: numerical imprecision can result in a candidate_id out of range
252
+ np.clip(candidate_ids, None, closest_dist_sq.size - 1, out=candidate_ids)
253
+
254
+ # Compute distances to center candidates
255
+ distance_to_candidates = _euclidean_distances(
256
+ X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True
257
+ )
258
+
259
+ # update closest distances squared and potential for each candidate
260
+ np.minimum(closest_dist_sq, distance_to_candidates, out=distance_to_candidates)
261
+ candidates_pot = distance_to_candidates @ sample_weight.reshape(-1, 1)
262
+
263
+ # Decide which candidate is the best
264
+ best_candidate = np.argmin(candidates_pot)
265
+ current_pot = candidates_pot[best_candidate]
266
+ closest_dist_sq = distance_to_candidates[best_candidate]
267
+ best_candidate = candidate_ids[best_candidate]
268
+
269
+ # Permanently add best center candidate found in local tries
270
+ if sp.issparse(X):
271
+ centers[c] = X[[best_candidate]].toarray()
272
+ else:
273
+ centers[c] = X[best_candidate]
274
+ indices[c] = best_candidate
275
+
276
+ return centers, indices
277
+
278
+
279
+ ###############################################################################
280
+ # K-means batch estimation by EM (expectation maximization)
281
+
282
+
283
+ def _tolerance(X, tol):
284
+ """Return a tolerance which is dependent on the dataset."""
285
+ if tol == 0:
286
+ return 0
287
+ if sp.issparse(X):
288
+ variances = mean_variance_axis(X, axis=0)[1]
289
+ else:
290
+ variances = np.var(X, axis=0)
291
+ return np.mean(variances) * tol
292
+
293
+
294
+ @validate_params(
295
+ {
296
+ "X": ["array-like", "sparse matrix"],
297
+ "sample_weight": ["array-like", None],
298
+ "return_n_iter": [bool],
299
+ },
300
+ prefer_skip_nested_validation=False,
301
+ )
302
+ def k_means(
303
+ X,
304
+ n_clusters,
305
+ *,
306
+ sample_weight=None,
307
+ init="k-means++",
308
+ n_init="auto",
309
+ max_iter=300,
310
+ verbose=False,
311
+ tol=1e-4,
312
+ random_state=None,
313
+ copy_x=True,
314
+ algorithm="lloyd",
315
+ return_n_iter=False,
316
+ ):
317
+ """Perform K-means clustering algorithm.
318
+
319
+ Read more in the :ref:`User Guide <k_means>`.
320
+
321
+ Parameters
322
+ ----------
323
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
324
+ The observations to cluster. It must be noted that the data
325
+ will be converted to C ordering, which will cause a memory copy
326
+ if the given data is not C-contiguous.
327
+
328
+ n_clusters : int
329
+ The number of clusters to form as well as the number of
330
+ centroids to generate.
331
+
332
+ sample_weight : array-like of shape (n_samples,), default=None
333
+ The weights for each observation in `X`. If `None`, all observations
334
+ are assigned equal weight. `sample_weight` is not used during
335
+ initialization if `init` is a callable or a user provided array.
336
+
337
+ init : {'k-means++', 'random'}, callable or array-like of shape \
338
+ (n_clusters, n_features), default='k-means++'
339
+ Method for initialization:
340
+
341
+ - `'k-means++'` : selects initial cluster centers for k-mean
342
+ clustering in a smart way to speed up convergence. See section
343
+ Notes in k_init for more details.
344
+ - `'random'`: choose `n_clusters` observations (rows) at random from data
345
+ for the initial centroids.
346
+ - If an array is passed, it should be of shape `(n_clusters, n_features)`
347
+ and gives the initial centers.
348
+ - If a callable is passed, it should take arguments `X`, `n_clusters` and a
349
+ random state and return an initialization.
350
+
351
+ n_init : 'auto' or int, default="auto"
352
+ Number of time the k-means algorithm will be run with different
353
+ centroid seeds. The final results will be the best output of
354
+ n_init consecutive runs in terms of inertia.
355
+
356
+ When `n_init='auto'`, the number of runs depends on the value of init:
357
+ 10 if using `init='random'` or `init` is a callable;
358
+ 1 if using `init='k-means++'` or `init` is an array-like.
359
+
360
+ .. versionadded:: 1.2
361
+ Added 'auto' option for `n_init`.
362
+
363
+ .. versionchanged:: 1.4
364
+ Default value for `n_init` changed to `'auto'`.
365
+
366
+ max_iter : int, default=300
367
+ Maximum number of iterations of the k-means algorithm to run.
368
+
369
+ verbose : bool, default=False
370
+ Verbosity mode.
371
+
372
+ tol : float, default=1e-4
373
+ Relative tolerance with regards to Frobenius norm of the difference
374
+ in the cluster centers of two consecutive iterations to declare
375
+ convergence.
376
+
377
+ random_state : int, RandomState instance or None, default=None
378
+ Determines random number generation for centroid initialization. Use
379
+ an int to make the randomness deterministic.
380
+ See :term:`Glossary <random_state>`.
381
+
382
+ copy_x : bool, default=True
383
+ When pre-computing distances it is more numerically accurate to center
384
+ the data first. If `copy_x` is True (default), then the original data is
385
+ not modified. If False, the original data is modified, and put back
386
+ before the function returns, but small numerical differences may be
387
+ introduced by subtracting and then adding the data mean. Note that if
388
+ the original data is not C-contiguous, a copy will be made even if
389
+ `copy_x` is False. If the original data is sparse, but not in CSR format,
390
+ a copy will be made even if `copy_x` is False.
391
+
392
+ algorithm : {"lloyd", "elkan"}, default="lloyd"
393
+ K-means algorithm to use. The classical EM-style algorithm is `"lloyd"`.
394
+ The `"elkan"` variation can be more efficient on some datasets with
395
+ well-defined clusters, by using the triangle inequality. However it's
396
+ more memory intensive due to the allocation of an extra array of shape
397
+ `(n_samples, n_clusters)`.
398
+
399
+ .. versionchanged:: 0.18
400
+ Added Elkan algorithm
401
+
402
+ .. versionchanged:: 1.1
403
+ Renamed "full" to "lloyd", and deprecated "auto" and "full".
404
+ Changed "auto" to use "lloyd" instead of "elkan".
405
+
406
+ return_n_iter : bool, default=False
407
+ Whether or not to return the number of iterations.
408
+
409
+ Returns
410
+ -------
411
+ centroid : ndarray of shape (n_clusters, n_features)
412
+ Centroids found at the last iteration of k-means.
413
+
414
+ label : ndarray of shape (n_samples,)
415
+ The `label[i]` is the code or index of the centroid the
416
+ i'th observation is closest to.
417
+
418
+ inertia : float
419
+ The final value of the inertia criterion (sum of squared distances to
420
+ the closest centroid for all observations in the training set).
421
+
422
+ best_n_iter : int
423
+ Number of iterations corresponding to the best results.
424
+ Returned only if `return_n_iter` is set to True.
425
+
426
+ Examples
427
+ --------
428
+ >>> import numpy as np
429
+ >>> from sklearn.cluster import k_means
430
+ >>> X = np.array([[1, 2], [1, 4], [1, 0],
431
+ ... [10, 2], [10, 4], [10, 0]])
432
+ >>> centroid, label, inertia = k_means(
433
+ ... X, n_clusters=2, n_init="auto", random_state=0
434
+ ... )
435
+ >>> centroid
436
+ array([[10., 2.],
437
+ [ 1., 2.]])
438
+ >>> label
439
+ array([1, 1, 1, 0, 0, 0], dtype=int32)
440
+ >>> inertia
441
+ 16.0
442
+ """
443
+ est = KMeans(
444
+ n_clusters=n_clusters,
445
+ init=init,
446
+ n_init=n_init,
447
+ max_iter=max_iter,
448
+ verbose=verbose,
449
+ tol=tol,
450
+ random_state=random_state,
451
+ copy_x=copy_x,
452
+ algorithm=algorithm,
453
+ ).fit(X, sample_weight=sample_weight)
454
+ if return_n_iter:
455
+ return est.cluster_centers_, est.labels_, est.inertia_, est.n_iter_
456
+ else:
457
+ return est.cluster_centers_, est.labels_, est.inertia_
458
+
459
+
460
+ def _kmeans_single_elkan(
461
+ X,
462
+ sample_weight,
463
+ centers_init,
464
+ max_iter=300,
465
+ verbose=False,
466
+ tol=1e-4,
467
+ n_threads=1,
468
+ ):
469
+ """A single run of k-means elkan, assumes preparation completed prior.
470
+
471
+ Parameters
472
+ ----------
473
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
474
+ The observations to cluster. If sparse matrix, must be in CSR format.
475
+
476
+ sample_weight : array-like of shape (n_samples,)
477
+ The weights for each observation in X.
478
+
479
+ centers_init : ndarray of shape (n_clusters, n_features)
480
+ The initial centers.
481
+
482
+ max_iter : int, default=300
483
+ Maximum number of iterations of the k-means algorithm to run.
484
+
485
+ verbose : bool, default=False
486
+ Verbosity mode.
487
+
488
+ tol : float, default=1e-4
489
+ Relative tolerance with regards to Frobenius norm of the difference
490
+ in the cluster centers of two consecutive iterations to declare
491
+ convergence.
492
+ It's not advised to set `tol=0` since convergence might never be
493
+ declared due to rounding errors. Use a very small number instead.
494
+
495
+ n_threads : int, default=1
496
+ The number of OpenMP threads to use for the computation. Parallelism is
497
+ sample-wise on the main cython loop which assigns each sample to its
498
+ closest center.
499
+
500
+ Returns
501
+ -------
502
+ centroid : ndarray of shape (n_clusters, n_features)
503
+ Centroids found at the last iteration of k-means.
504
+
505
+ label : ndarray of shape (n_samples,)
506
+ label[i] is the code or index of the centroid the
507
+ i'th observation is closest to.
508
+
509
+ inertia : float
510
+ The final value of the inertia criterion (sum of squared distances to
511
+ the closest centroid for all observations in the training set).
512
+
513
+ n_iter : int
514
+ Number of iterations run.
515
+ """
516
+ n_samples = X.shape[0]
517
+ n_clusters = centers_init.shape[0]
518
+
519
+ # Buffers to avoid new allocations at each iteration.
520
+ centers = centers_init
521
+ centers_new = np.zeros_like(centers)
522
+ weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype)
523
+ labels = np.full(n_samples, -1, dtype=np.int32)
524
+ labels_old = labels.copy()
525
+ center_half_distances = euclidean_distances(centers) / 2
526
+ distance_next_center = np.partition(
527
+ np.asarray(center_half_distances), kth=1, axis=0
528
+ )[1]
529
+ upper_bounds = np.zeros(n_samples, dtype=X.dtype)
530
+ lower_bounds = np.zeros((n_samples, n_clusters), dtype=X.dtype)
531
+ center_shift = np.zeros(n_clusters, dtype=X.dtype)
532
+
533
+ if sp.issparse(X):
534
+ init_bounds = init_bounds_sparse
535
+ elkan_iter = elkan_iter_chunked_sparse
536
+ _inertia = _inertia_sparse
537
+ else:
538
+ init_bounds = init_bounds_dense
539
+ elkan_iter = elkan_iter_chunked_dense
540
+ _inertia = _inertia_dense
541
+
542
+ init_bounds(
543
+ X,
544
+ centers,
545
+ center_half_distances,
546
+ labels,
547
+ upper_bounds,
548
+ lower_bounds,
549
+ n_threads=n_threads,
550
+ )
551
+
552
+ strict_convergence = False
553
+
554
+ for i in range(max_iter):
555
+ elkan_iter(
556
+ X,
557
+ sample_weight,
558
+ centers,
559
+ centers_new,
560
+ weight_in_clusters,
561
+ center_half_distances,
562
+ distance_next_center,
563
+ upper_bounds,
564
+ lower_bounds,
565
+ labels,
566
+ center_shift,
567
+ n_threads,
568
+ )
569
+
570
+ # compute new pairwise distances between centers and closest other
571
+ # center of each center for next iterations
572
+ center_half_distances = euclidean_distances(centers_new) / 2
573
+ distance_next_center = np.partition(
574
+ np.asarray(center_half_distances), kth=1, axis=0
575
+ )[1]
576
+
577
+ if verbose:
578
+ inertia = _inertia(X, sample_weight, centers, labels, n_threads)
579
+ print(f"Iteration {i}, inertia {inertia}")
580
+
581
+ centers, centers_new = centers_new, centers
582
+
583
+ if np.array_equal(labels, labels_old):
584
+ # First check the labels for strict convergence.
585
+ if verbose:
586
+ print(f"Converged at iteration {i}: strict convergence.")
587
+ strict_convergence = True
588
+ break
589
+ else:
590
+ # No strict convergence, check for tol based convergence.
591
+ center_shift_tot = (center_shift**2).sum()
592
+ if center_shift_tot <= tol:
593
+ if verbose:
594
+ print(
595
+ f"Converged at iteration {i}: center shift "
596
+ f"{center_shift_tot} within tolerance {tol}."
597
+ )
598
+ break
599
+
600
+ labels_old[:] = labels
601
+
602
+ if not strict_convergence:
603
+ # rerun E-step so that predicted labels match cluster centers
604
+ elkan_iter(
605
+ X,
606
+ sample_weight,
607
+ centers,
608
+ centers,
609
+ weight_in_clusters,
610
+ center_half_distances,
611
+ distance_next_center,
612
+ upper_bounds,
613
+ lower_bounds,
614
+ labels,
615
+ center_shift,
616
+ n_threads,
617
+ update_centers=False,
618
+ )
619
+
620
+ inertia = _inertia(X, sample_weight, centers, labels, n_threads)
621
+
622
+ return labels, inertia, centers, i + 1
623
+
624
+
625
+ def _kmeans_single_lloyd(
626
+ X,
627
+ sample_weight,
628
+ centers_init,
629
+ max_iter=300,
630
+ verbose=False,
631
+ tol=1e-4,
632
+ n_threads=1,
633
+ ):
634
+ """A single run of k-means lloyd, assumes preparation completed prior.
635
+
636
+ Parameters
637
+ ----------
638
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
639
+ The observations to cluster. If sparse matrix, must be in CSR format.
640
+
641
+ sample_weight : ndarray of shape (n_samples,)
642
+ The weights for each observation in X.
643
+
644
+ centers_init : ndarray of shape (n_clusters, n_features)
645
+ The initial centers.
646
+
647
+ max_iter : int, default=300
648
+ Maximum number of iterations of the k-means algorithm to run.
649
+
650
+ verbose : bool, default=False
651
+ Verbosity mode
652
+
653
+ tol : float, default=1e-4
654
+ Relative tolerance with regards to Frobenius norm of the difference
655
+ in the cluster centers of two consecutive iterations to declare
656
+ convergence.
657
+ It's not advised to set `tol=0` since convergence might never be
658
+ declared due to rounding errors. Use a very small number instead.
659
+
660
+ n_threads : int, default=1
661
+ The number of OpenMP threads to use for the computation. Parallelism is
662
+ sample-wise on the main cython loop which assigns each sample to its
663
+ closest center.
664
+
665
+ Returns
666
+ -------
667
+ centroid : ndarray of shape (n_clusters, n_features)
668
+ Centroids found at the last iteration of k-means.
669
+
670
+ label : ndarray of shape (n_samples,)
671
+ label[i] is the code or index of the centroid the
672
+ i'th observation is closest to.
673
+
674
+ inertia : float
675
+ The final value of the inertia criterion (sum of squared distances to
676
+ the closest centroid for all observations in the training set).
677
+
678
+ n_iter : int
679
+ Number of iterations run.
680
+ """
681
+ n_clusters = centers_init.shape[0]
682
+
683
+ # Buffers to avoid new allocations at each iteration.
684
+ centers = centers_init
685
+ centers_new = np.zeros_like(centers)
686
+ labels = np.full(X.shape[0], -1, dtype=np.int32)
687
+ labels_old = labels.copy()
688
+ weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype)
689
+ center_shift = np.zeros(n_clusters, dtype=X.dtype)
690
+
691
+ if sp.issparse(X):
692
+ lloyd_iter = lloyd_iter_chunked_sparse
693
+ _inertia = _inertia_sparse
694
+ else:
695
+ lloyd_iter = lloyd_iter_chunked_dense
696
+ _inertia = _inertia_dense
697
+
698
+ strict_convergence = False
699
+
700
+ # Threadpoolctl context to limit the number of threads in second level of
701
+ # nested parallelism (i.e. BLAS) to avoid oversubscription.
702
+ with threadpool_limits(limits=1, user_api="blas"):
703
+ for i in range(max_iter):
704
+ lloyd_iter(
705
+ X,
706
+ sample_weight,
707
+ centers,
708
+ centers_new,
709
+ weight_in_clusters,
710
+ labels,
711
+ center_shift,
712
+ n_threads,
713
+ )
714
+
715
+ if verbose:
716
+ inertia = _inertia(X, sample_weight, centers, labels, n_threads)
717
+ print(f"Iteration {i}, inertia {inertia}.")
718
+
719
+ centers, centers_new = centers_new, centers
720
+
721
+ if np.array_equal(labels, labels_old):
722
+ # First check the labels for strict convergence.
723
+ if verbose:
724
+ print(f"Converged at iteration {i}: strict convergence.")
725
+ strict_convergence = True
726
+ break
727
+ else:
728
+ # No strict convergence, check for tol based convergence.
729
+ center_shift_tot = (center_shift**2).sum()
730
+ if center_shift_tot <= tol:
731
+ if verbose:
732
+ print(
733
+ f"Converged at iteration {i}: center shift "
734
+ f"{center_shift_tot} within tolerance {tol}."
735
+ )
736
+ break
737
+
738
+ labels_old[:] = labels
739
+
740
+ if not strict_convergence:
741
+ # rerun E-step so that predicted labels match cluster centers
742
+ lloyd_iter(
743
+ X,
744
+ sample_weight,
745
+ centers,
746
+ centers,
747
+ weight_in_clusters,
748
+ labels,
749
+ center_shift,
750
+ n_threads,
751
+ update_centers=False,
752
+ )
753
+
754
+ inertia = _inertia(X, sample_weight, centers, labels, n_threads)
755
+
756
+ return labels, inertia, centers, i + 1
757
+
758
+
759
+ def _labels_inertia(X, sample_weight, centers, n_threads=1, return_inertia=True):
760
+ """E step of the K-means EM algorithm.
761
+
762
+ Compute the labels and the inertia of the given samples and centers.
763
+
764
+ Parameters
765
+ ----------
766
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
767
+ The input samples to assign to the labels. If sparse matrix, must
768
+ be in CSR format.
769
+
770
+ sample_weight : ndarray of shape (n_samples,)
771
+ The weights for each observation in X.
772
+
773
+ x_squared_norms : ndarray of shape (n_samples,)
774
+ Precomputed squared euclidean norm of each data point, to speed up
775
+ computations.
776
+
777
+ centers : ndarray of shape (n_clusters, n_features)
778
+ The cluster centers.
779
+
780
+ n_threads : int, default=1
781
+ The number of OpenMP threads to use for the computation. Parallelism is
782
+ sample-wise on the main cython loop which assigns each sample to its
783
+ closest center.
784
+
785
+ return_inertia : bool, default=True
786
+ Whether to compute and return the inertia.
787
+
788
+ Returns
789
+ -------
790
+ labels : ndarray of shape (n_samples,)
791
+ The resulting assignment.
792
+
793
+ inertia : float
794
+ Sum of squared distances of samples to their closest cluster center.
795
+ Inertia is only returned if return_inertia is True.
796
+ """
797
+ n_samples = X.shape[0]
798
+ n_clusters = centers.shape[0]
799
+
800
+ labels = np.full(n_samples, -1, dtype=np.int32)
801
+ center_shift = np.zeros(n_clusters, dtype=centers.dtype)
802
+
803
+ if sp.issparse(X):
804
+ _labels = lloyd_iter_chunked_sparse
805
+ _inertia = _inertia_sparse
806
+ else:
807
+ _labels = lloyd_iter_chunked_dense
808
+ _inertia = _inertia_dense
809
+
810
+ _labels(
811
+ X,
812
+ sample_weight,
813
+ centers,
814
+ centers_new=None,
815
+ weight_in_clusters=None,
816
+ labels=labels,
817
+ center_shift=center_shift,
818
+ n_threads=n_threads,
819
+ update_centers=False,
820
+ )
821
+
822
+ if return_inertia:
823
+ inertia = _inertia(X, sample_weight, centers, labels, n_threads)
824
+ return labels, inertia
825
+
826
+ return labels
827
+
828
+
829
+ def _labels_inertia_threadpool_limit(
830
+ X, sample_weight, centers, n_threads=1, return_inertia=True
831
+ ):
832
+ """Same as _labels_inertia but in a threadpool_limits context."""
833
+ with threadpool_limits(limits=1, user_api="blas"):
834
+ result = _labels_inertia(X, sample_weight, centers, n_threads, return_inertia)
835
+
836
+ return result
837
+
838
+
839
+ class _BaseKMeans(
840
+ ClassNamePrefixFeaturesOutMixin, TransformerMixin, ClusterMixin, BaseEstimator, ABC
841
+ ):
842
+ """Base class for KMeans and MiniBatchKMeans"""
843
+
844
+ _parameter_constraints: dict = {
845
+ "n_clusters": [Interval(Integral, 1, None, closed="left")],
846
+ "init": [StrOptions({"k-means++", "random"}), callable, "array-like"],
847
+ "n_init": [
848
+ StrOptions({"auto"}),
849
+ Interval(Integral, 1, None, closed="left"),
850
+ ],
851
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
852
+ "tol": [Interval(Real, 0, None, closed="left")],
853
+ "verbose": ["verbose"],
854
+ "random_state": ["random_state"],
855
+ }
856
+
857
+ def __init__(
858
+ self,
859
+ n_clusters,
860
+ *,
861
+ init,
862
+ n_init,
863
+ max_iter,
864
+ tol,
865
+ verbose,
866
+ random_state,
867
+ ):
868
+ self.n_clusters = n_clusters
869
+ self.init = init
870
+ self.max_iter = max_iter
871
+ self.tol = tol
872
+ self.n_init = n_init
873
+ self.verbose = verbose
874
+ self.random_state = random_state
875
+
876
+ def _check_params_vs_input(self, X, default_n_init=None):
877
+ # n_clusters
878
+ if X.shape[0] < self.n_clusters:
879
+ raise ValueError(
880
+ f"n_samples={X.shape[0]} should be >= n_clusters={self.n_clusters}."
881
+ )
882
+
883
+ # tol
884
+ self._tol = _tolerance(X, self.tol)
885
+
886
+ # n-init
887
+ if self.n_init == "auto":
888
+ if isinstance(self.init, str) and self.init == "k-means++":
889
+ self._n_init = 1
890
+ elif isinstance(self.init, str) and self.init == "random":
891
+ self._n_init = default_n_init
892
+ elif callable(self.init):
893
+ self._n_init = default_n_init
894
+ else: # array-like
895
+ self._n_init = 1
896
+ else:
897
+ self._n_init = self.n_init
898
+
899
+ if _is_arraylike_not_scalar(self.init) and self._n_init != 1:
900
+ warnings.warn(
901
+ (
902
+ "Explicit initial center position passed: performing only"
903
+ f" one init in {self.__class__.__name__} instead of "
904
+ f"n_init={self._n_init}."
905
+ ),
906
+ RuntimeWarning,
907
+ stacklevel=2,
908
+ )
909
+ self._n_init = 1
910
+
911
+ @abstractmethod
912
+ def _warn_mkl_vcomp(self, n_active_threads):
913
+ """Issue an estimator specific warning when vcomp and mkl are both present
914
+
915
+ This method is called by `_check_mkl_vcomp`.
916
+ """
917
+
918
+ def _check_mkl_vcomp(self, X, n_samples):
919
+ """Check when vcomp and mkl are both present"""
920
+ # The BLAS call inside a prange in lloyd_iter_chunked_dense is known to
921
+ # cause a small memory leak when there are less chunks than the number
922
+ # of available threads. It only happens when the OpenMP library is
923
+ # vcomp (microsoft OpenMP) and the BLAS library is MKL. see #18653
924
+ if sp.issparse(X):
925
+ return
926
+
927
+ n_active_threads = int(np.ceil(n_samples / CHUNK_SIZE))
928
+ if n_active_threads < self._n_threads:
929
+ modules = threadpool_info()
930
+ has_vcomp = "vcomp" in [module["prefix"] for module in modules]
931
+ has_mkl = ("mkl", "intel") in [
932
+ (module["internal_api"], module.get("threading_layer", None))
933
+ for module in modules
934
+ ]
935
+ if has_vcomp and has_mkl:
936
+ self._warn_mkl_vcomp(n_active_threads)
937
+
938
+ def _validate_center_shape(self, X, centers):
939
+ """Check if centers is compatible with X and n_clusters."""
940
+ if centers.shape[0] != self.n_clusters:
941
+ raise ValueError(
942
+ f"The shape of the initial centers {centers.shape} does not "
943
+ f"match the number of clusters {self.n_clusters}."
944
+ )
945
+ if centers.shape[1] != X.shape[1]:
946
+ raise ValueError(
947
+ f"The shape of the initial centers {centers.shape} does not "
948
+ f"match the number of features of the data {X.shape[1]}."
949
+ )
950
+
951
+ def _check_test_data(self, X):
952
+ X = self._validate_data(
953
+ X,
954
+ accept_sparse="csr",
955
+ reset=False,
956
+ dtype=[np.float64, np.float32],
957
+ order="C",
958
+ accept_large_sparse=False,
959
+ )
960
+ return X
961
+
962
+ def _init_centroids(
963
+ self,
964
+ X,
965
+ x_squared_norms,
966
+ init,
967
+ random_state,
968
+ sample_weight,
969
+ init_size=None,
970
+ n_centroids=None,
971
+ ):
972
+ """Compute the initial centroids.
973
+
974
+ Parameters
975
+ ----------
976
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
977
+ The input samples.
978
+
979
+ x_squared_norms : ndarray of shape (n_samples,)
980
+ Squared euclidean norm of each data point. Pass it if you have it
981
+ at hands already to avoid it being recomputed here.
982
+
983
+ init : {'k-means++', 'random'}, callable or ndarray of shape \
984
+ (n_clusters, n_features)
985
+ Method for initialization.
986
+
987
+ random_state : RandomState instance
988
+ Determines random number generation for centroid initialization.
989
+ See :term:`Glossary <random_state>`.
990
+
991
+ sample_weight : ndarray of shape (n_samples,)
992
+ The weights for each observation in X. `sample_weight` is not used
993
+ during initialization if `init` is a callable or a user provided
994
+ array.
995
+
996
+ init_size : int, default=None
997
+ Number of samples to randomly sample for speeding up the
998
+ initialization (sometimes at the expense of accuracy).
999
+
1000
+ n_centroids : int, default=None
1001
+ Number of centroids to initialize.
1002
+ If left to 'None' the number of centroids will be equal to
1003
+ number of clusters to form (self.n_clusters).
1004
+
1005
+ Returns
1006
+ -------
1007
+ centers : ndarray of shape (n_clusters, n_features)
1008
+ Initial centroids of clusters.
1009
+ """
1010
+ n_samples = X.shape[0]
1011
+ n_clusters = self.n_clusters if n_centroids is None else n_centroids
1012
+
1013
+ if init_size is not None and init_size < n_samples:
1014
+ init_indices = random_state.randint(0, n_samples, init_size)
1015
+ X = X[init_indices]
1016
+ x_squared_norms = x_squared_norms[init_indices]
1017
+ n_samples = X.shape[0]
1018
+ sample_weight = sample_weight[init_indices]
1019
+
1020
+ if isinstance(init, str) and init == "k-means++":
1021
+ centers, _ = _kmeans_plusplus(
1022
+ X,
1023
+ n_clusters,
1024
+ random_state=random_state,
1025
+ x_squared_norms=x_squared_norms,
1026
+ sample_weight=sample_weight,
1027
+ )
1028
+ elif isinstance(init, str) and init == "random":
1029
+ seeds = random_state.choice(
1030
+ n_samples,
1031
+ size=n_clusters,
1032
+ replace=False,
1033
+ p=sample_weight / sample_weight.sum(),
1034
+ )
1035
+ centers = X[seeds]
1036
+ elif _is_arraylike_not_scalar(self.init):
1037
+ centers = init
1038
+ elif callable(init):
1039
+ centers = init(X, n_clusters, random_state=random_state)
1040
+ centers = check_array(centers, dtype=X.dtype, copy=False, order="C")
1041
+ self._validate_center_shape(X, centers)
1042
+
1043
+ if sp.issparse(centers):
1044
+ centers = centers.toarray()
1045
+
1046
+ return centers
1047
+
1048
+ def fit_predict(self, X, y=None, sample_weight=None):
1049
+ """Compute cluster centers and predict cluster index for each sample.
1050
+
1051
+ Convenience method; equivalent to calling fit(X) followed by
1052
+ predict(X).
1053
+
1054
+ Parameters
1055
+ ----------
1056
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1057
+ New data to transform.
1058
+
1059
+ y : Ignored
1060
+ Not used, present here for API consistency by convention.
1061
+
1062
+ sample_weight : array-like of shape (n_samples,), default=None
1063
+ The weights for each observation in X. If None, all observations
1064
+ are assigned equal weight.
1065
+
1066
+ Returns
1067
+ -------
1068
+ labels : ndarray of shape (n_samples,)
1069
+ Index of the cluster each sample belongs to.
1070
+ """
1071
+ return self.fit(X, sample_weight=sample_weight).labels_
1072
+
1073
+ def predict(self, X, sample_weight="deprecated"):
1074
+ """Predict the closest cluster each sample in X belongs to.
1075
+
1076
+ In the vector quantization literature, `cluster_centers_` is called
1077
+ the code book and each value returned by `predict` is the index of
1078
+ the closest code in the code book.
1079
+
1080
+ Parameters
1081
+ ----------
1082
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1083
+ New data to predict.
1084
+
1085
+ sample_weight : array-like of shape (n_samples,), default=None
1086
+ The weights for each observation in X. If None, all observations
1087
+ are assigned equal weight.
1088
+
1089
+ .. deprecated:: 1.3
1090
+ The parameter `sample_weight` is deprecated in version 1.3
1091
+ and will be removed in 1.5.
1092
+
1093
+ Returns
1094
+ -------
1095
+ labels : ndarray of shape (n_samples,)
1096
+ Index of the cluster each sample belongs to.
1097
+ """
1098
+ check_is_fitted(self)
1099
+
1100
+ X = self._check_test_data(X)
1101
+ if not (isinstance(sample_weight, str) and sample_weight == "deprecated"):
1102
+ warnings.warn(
1103
+ (
1104
+ "'sample_weight' was deprecated in version 1.3 and "
1105
+ "will be removed in 1.5."
1106
+ ),
1107
+ FutureWarning,
1108
+ )
1109
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
1110
+ else:
1111
+ sample_weight = _check_sample_weight(None, X, dtype=X.dtype)
1112
+
1113
+ labels = _labels_inertia_threadpool_limit(
1114
+ X,
1115
+ sample_weight,
1116
+ self.cluster_centers_,
1117
+ n_threads=self._n_threads,
1118
+ return_inertia=False,
1119
+ )
1120
+
1121
+ return labels
1122
+
1123
+ def fit_transform(self, X, y=None, sample_weight=None):
1124
+ """Compute clustering and transform X to cluster-distance space.
1125
+
1126
+ Equivalent to fit(X).transform(X), but more efficiently implemented.
1127
+
1128
+ Parameters
1129
+ ----------
1130
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1131
+ New data to transform.
1132
+
1133
+ y : Ignored
1134
+ Not used, present here for API consistency by convention.
1135
+
1136
+ sample_weight : array-like of shape (n_samples,), default=None
1137
+ The weights for each observation in X. If None, all observations
1138
+ are assigned equal weight.
1139
+
1140
+ Returns
1141
+ -------
1142
+ X_new : ndarray of shape (n_samples, n_clusters)
1143
+ X transformed in the new space.
1144
+ """
1145
+ return self.fit(X, sample_weight=sample_weight)._transform(X)
1146
+
1147
+ def transform(self, X):
1148
+ """Transform X to a cluster-distance space.
1149
+
1150
+ In the new space, each dimension is the distance to the cluster
1151
+ centers. Note that even if X is sparse, the array returned by
1152
+ `transform` will typically be dense.
1153
+
1154
+ Parameters
1155
+ ----------
1156
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1157
+ New data to transform.
1158
+
1159
+ Returns
1160
+ -------
1161
+ X_new : ndarray of shape (n_samples, n_clusters)
1162
+ X transformed in the new space.
1163
+ """
1164
+ check_is_fitted(self)
1165
+
1166
+ X = self._check_test_data(X)
1167
+ return self._transform(X)
1168
+
1169
+ def _transform(self, X):
1170
+ """Guts of transform method; no input validation."""
1171
+ return euclidean_distances(X, self.cluster_centers_)
1172
+
1173
+ def score(self, X, y=None, sample_weight=None):
1174
+ """Opposite of the value of X on the K-means objective.
1175
+
1176
+ Parameters
1177
+ ----------
1178
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1179
+ New data.
1180
+
1181
+ y : Ignored
1182
+ Not used, present here for API consistency by convention.
1183
+
1184
+ sample_weight : array-like of shape (n_samples,), default=None
1185
+ The weights for each observation in X. If None, all observations
1186
+ are assigned equal weight.
1187
+
1188
+ Returns
1189
+ -------
1190
+ score : float
1191
+ Opposite of the value of X on the K-means objective.
1192
+ """
1193
+ check_is_fitted(self)
1194
+
1195
+ X = self._check_test_data(X)
1196
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
1197
+
1198
+ _, scores = _labels_inertia_threadpool_limit(
1199
+ X, sample_weight, self.cluster_centers_, self._n_threads
1200
+ )
1201
+ return -scores
1202
+
1203
+ def _more_tags(self):
1204
+ return {
1205
+ "_xfail_checks": {
1206
+ "check_sample_weights_invariance": (
1207
+ "zero sample_weight is not equivalent to removing samples"
1208
+ ),
1209
+ },
1210
+ }
1211
+
1212
+
1213
+ class KMeans(_BaseKMeans):
1214
+ """K-Means clustering.
1215
+
1216
+ Read more in the :ref:`User Guide <k_means>`.
1217
+
1218
+ Parameters
1219
+ ----------
1220
+
1221
+ n_clusters : int, default=8
1222
+ The number of clusters to form as well as the number of
1223
+ centroids to generate.
1224
+
1225
+ For an example of how to choose an optimal value for `n_clusters` refer to
1226
+ :ref:`sphx_glr_auto_examples_cluster_plot_kmeans_silhouette_analysis.py`.
1227
+
1228
+ init : {'k-means++', 'random'}, callable or array-like of shape \
1229
+ (n_clusters, n_features), default='k-means++'
1230
+ Method for initialization:
1231
+
1232
+ * 'k-means++' : selects initial cluster centroids using sampling \
1233
+ based on an empirical probability distribution of the points' \
1234
+ contribution to the overall inertia. This technique speeds up \
1235
+ convergence. The algorithm implemented is "greedy k-means++". It \
1236
+ differs from the vanilla k-means++ by making several trials at \
1237
+ each sampling step and choosing the best centroid among them.
1238
+
1239
+ * 'random': choose `n_clusters` observations (rows) at random from \
1240
+ data for the initial centroids.
1241
+
1242
+ * If an array is passed, it should be of shape (n_clusters, n_features)\
1243
+ and gives the initial centers.
1244
+
1245
+ * If a callable is passed, it should take arguments X, n_clusters and a\
1246
+ random state and return an initialization.
1247
+
1248
+ For an example of how to use the different `init` strategy, see the example
1249
+ entitled :ref:`sphx_glr_auto_examples_cluster_plot_kmeans_digits.py`.
1250
+
1251
+ n_init : 'auto' or int, default='auto'
1252
+ Number of times the k-means algorithm is run with different centroid
1253
+ seeds. The final results is the best output of `n_init` consecutive runs
1254
+ in terms of inertia. Several runs are recommended for sparse
1255
+ high-dimensional problems (see :ref:`kmeans_sparse_high_dim`).
1256
+
1257
+ When `n_init='auto'`, the number of runs depends on the value of init:
1258
+ 10 if using `init='random'` or `init` is a callable;
1259
+ 1 if using `init='k-means++'` or `init` is an array-like.
1260
+
1261
+ .. versionadded:: 1.2
1262
+ Added 'auto' option for `n_init`.
1263
+
1264
+ .. versionchanged:: 1.4
1265
+ Default value for `n_init` changed to `'auto'`.
1266
+
1267
+ max_iter : int, default=300
1268
+ Maximum number of iterations of the k-means algorithm for a
1269
+ single run.
1270
+
1271
+ tol : float, default=1e-4
1272
+ Relative tolerance with regards to Frobenius norm of the difference
1273
+ in the cluster centers of two consecutive iterations to declare
1274
+ convergence.
1275
+
1276
+ verbose : int, default=0
1277
+ Verbosity mode.
1278
+
1279
+ random_state : int, RandomState instance or None, default=None
1280
+ Determines random number generation for centroid initialization. Use
1281
+ an int to make the randomness deterministic.
1282
+ See :term:`Glossary <random_state>`.
1283
+
1284
+ copy_x : bool, default=True
1285
+ When pre-computing distances it is more numerically accurate to center
1286
+ the data first. If copy_x is True (default), then the original data is
1287
+ not modified. If False, the original data is modified, and put back
1288
+ before the function returns, but small numerical differences may be
1289
+ introduced by subtracting and then adding the data mean. Note that if
1290
+ the original data is not C-contiguous, a copy will be made even if
1291
+ copy_x is False. If the original data is sparse, but not in CSR format,
1292
+ a copy will be made even if copy_x is False.
1293
+
1294
+ algorithm : {"lloyd", "elkan"}, default="lloyd"
1295
+ K-means algorithm to use. The classical EM-style algorithm is `"lloyd"`.
1296
+ The `"elkan"` variation can be more efficient on some datasets with
1297
+ well-defined clusters, by using the triangle inequality. However it's
1298
+ more memory intensive due to the allocation of an extra array of shape
1299
+ `(n_samples, n_clusters)`.
1300
+
1301
+ .. versionchanged:: 0.18
1302
+ Added Elkan algorithm
1303
+
1304
+ .. versionchanged:: 1.1
1305
+ Renamed "full" to "lloyd", and deprecated "auto" and "full".
1306
+ Changed "auto" to use "lloyd" instead of "elkan".
1307
+
1308
+ Attributes
1309
+ ----------
1310
+ cluster_centers_ : ndarray of shape (n_clusters, n_features)
1311
+ Coordinates of cluster centers. If the algorithm stops before fully
1312
+ converging (see ``tol`` and ``max_iter``), these will not be
1313
+ consistent with ``labels_``.
1314
+
1315
+ labels_ : ndarray of shape (n_samples,)
1316
+ Labels of each point
1317
+
1318
+ inertia_ : float
1319
+ Sum of squared distances of samples to their closest cluster center,
1320
+ weighted by the sample weights if provided.
1321
+
1322
+ n_iter_ : int
1323
+ Number of iterations run.
1324
+
1325
+ n_features_in_ : int
1326
+ Number of features seen during :term:`fit`.
1327
+
1328
+ .. versionadded:: 0.24
1329
+
1330
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1331
+ Names of features seen during :term:`fit`. Defined only when `X`
1332
+ has feature names that are all strings.
1333
+
1334
+ .. versionadded:: 1.0
1335
+
1336
+ See Also
1337
+ --------
1338
+ MiniBatchKMeans : Alternative online implementation that does incremental
1339
+ updates of the centers positions using mini-batches.
1340
+ For large scale learning (say n_samples > 10k) MiniBatchKMeans is
1341
+ probably much faster than the default batch implementation.
1342
+
1343
+ Notes
1344
+ -----
1345
+ The k-means problem is solved using either Lloyd's or Elkan's algorithm.
1346
+
1347
+ The average complexity is given by O(k n T), where n is the number of
1348
+ samples and T is the number of iteration.
1349
+
1350
+ The worst case complexity is given by O(n^(k+2/p)) with
1351
+ n = n_samples, p = n_features.
1352
+ Refer to :doi:`"How slow is the k-means method?" D. Arthur and S. Vassilvitskii -
1353
+ SoCG2006.<10.1145/1137856.1137880>` for more details.
1354
+
1355
+ In practice, the k-means algorithm is very fast (one of the fastest
1356
+ clustering algorithms available), but it falls in local minima. That's why
1357
+ it can be useful to restart it several times.
1358
+
1359
+ If the algorithm stops before fully converging (because of ``tol`` or
1360
+ ``max_iter``), ``labels_`` and ``cluster_centers_`` will not be consistent,
1361
+ i.e. the ``cluster_centers_`` will not be the means of the points in each
1362
+ cluster. Also, the estimator will reassign ``labels_`` after the last
1363
+ iteration to make ``labels_`` consistent with ``predict`` on the training
1364
+ set.
1365
+
1366
+ Examples
1367
+ --------
1368
+
1369
+ >>> from sklearn.cluster import KMeans
1370
+ >>> import numpy as np
1371
+ >>> X = np.array([[1, 2], [1, 4], [1, 0],
1372
+ ... [10, 2], [10, 4], [10, 0]])
1373
+ >>> kmeans = KMeans(n_clusters=2, random_state=0, n_init="auto").fit(X)
1374
+ >>> kmeans.labels_
1375
+ array([1, 1, 1, 0, 0, 0], dtype=int32)
1376
+ >>> kmeans.predict([[0, 0], [12, 3]])
1377
+ array([1, 0], dtype=int32)
1378
+ >>> kmeans.cluster_centers_
1379
+ array([[10., 2.],
1380
+ [ 1., 2.]])
1381
+
1382
+ For a more detailed example of K-Means using the iris dataset see
1383
+ :ref:`sphx_glr_auto_examples_cluster_plot_cluster_iris.py`.
1384
+
1385
+ For examples of common problems with K-Means and how to address them see
1386
+ :ref:`sphx_glr_auto_examples_cluster_plot_kmeans_assumptions.py`.
1387
+
1388
+ For an example of how to use K-Means to perform color quantization see
1389
+ :ref:`sphx_glr_auto_examples_cluster_plot_color_quantization.py`.
1390
+
1391
+ For a demonstration of how K-Means can be used to cluster text documents see
1392
+ :ref:`sphx_glr_auto_examples_text_plot_document_clustering.py`.
1393
+
1394
+ For a comparison between K-Means and MiniBatchKMeans refer to example
1395
+ :ref:`sphx_glr_auto_examples_cluster_plot_mini_batch_kmeans.py`.
1396
+ """
1397
+
1398
+ _parameter_constraints: dict = {
1399
+ **_BaseKMeans._parameter_constraints,
1400
+ "copy_x": ["boolean"],
1401
+ "algorithm": [StrOptions({"lloyd", "elkan"})],
1402
+ }
1403
+
1404
+ def __init__(
1405
+ self,
1406
+ n_clusters=8,
1407
+ *,
1408
+ init="k-means++",
1409
+ n_init="auto",
1410
+ max_iter=300,
1411
+ tol=1e-4,
1412
+ verbose=0,
1413
+ random_state=None,
1414
+ copy_x=True,
1415
+ algorithm="lloyd",
1416
+ ):
1417
+ super().__init__(
1418
+ n_clusters=n_clusters,
1419
+ init=init,
1420
+ n_init=n_init,
1421
+ max_iter=max_iter,
1422
+ tol=tol,
1423
+ verbose=verbose,
1424
+ random_state=random_state,
1425
+ )
1426
+
1427
+ self.copy_x = copy_x
1428
+ self.algorithm = algorithm
1429
+
1430
+ def _check_params_vs_input(self, X):
1431
+ super()._check_params_vs_input(X, default_n_init=10)
1432
+
1433
+ self._algorithm = self.algorithm
1434
+ if self._algorithm == "elkan" and self.n_clusters == 1:
1435
+ warnings.warn(
1436
+ (
1437
+ "algorithm='elkan' doesn't make sense for a single "
1438
+ "cluster. Using 'lloyd' instead."
1439
+ ),
1440
+ RuntimeWarning,
1441
+ )
1442
+ self._algorithm = "lloyd"
1443
+
1444
+ def _warn_mkl_vcomp(self, n_active_threads):
1445
+ """Warn when vcomp and mkl are both present"""
1446
+ warnings.warn(
1447
+ "KMeans is known to have a memory leak on Windows "
1448
+ "with MKL, when there are less chunks than available "
1449
+ "threads. You can avoid it by setting the environment"
1450
+ f" variable OMP_NUM_THREADS={n_active_threads}."
1451
+ )
1452
+
1453
+ @_fit_context(prefer_skip_nested_validation=True)
1454
+ def fit(self, X, y=None, sample_weight=None):
1455
+ """Compute k-means clustering.
1456
+
1457
+ Parameters
1458
+ ----------
1459
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1460
+ Training instances to cluster. It must be noted that the data
1461
+ will be converted to C ordering, which will cause a memory
1462
+ copy if the given data is not C-contiguous.
1463
+ If a sparse matrix is passed, a copy will be made if it's not in
1464
+ CSR format.
1465
+
1466
+ y : Ignored
1467
+ Not used, present here for API consistency by convention.
1468
+
1469
+ sample_weight : array-like of shape (n_samples,), default=None
1470
+ The weights for each observation in X. If None, all observations
1471
+ are assigned equal weight. `sample_weight` is not used during
1472
+ initialization if `init` is a callable or a user provided array.
1473
+
1474
+ .. versionadded:: 0.20
1475
+
1476
+ Returns
1477
+ -------
1478
+ self : object
1479
+ Fitted estimator.
1480
+ """
1481
+ X = self._validate_data(
1482
+ X,
1483
+ accept_sparse="csr",
1484
+ dtype=[np.float64, np.float32],
1485
+ order="C",
1486
+ copy=self.copy_x,
1487
+ accept_large_sparse=False,
1488
+ )
1489
+
1490
+ self._check_params_vs_input(X)
1491
+
1492
+ random_state = check_random_state(self.random_state)
1493
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
1494
+ self._n_threads = _openmp_effective_n_threads()
1495
+
1496
+ # Validate init array
1497
+ init = self.init
1498
+ init_is_array_like = _is_arraylike_not_scalar(init)
1499
+ if init_is_array_like:
1500
+ init = check_array(init, dtype=X.dtype, copy=True, order="C")
1501
+ self._validate_center_shape(X, init)
1502
+
1503
+ # subtract of mean of x for more accurate distance computations
1504
+ if not sp.issparse(X):
1505
+ X_mean = X.mean(axis=0)
1506
+ # The copy was already done above
1507
+ X -= X_mean
1508
+
1509
+ if init_is_array_like:
1510
+ init -= X_mean
1511
+
1512
+ # precompute squared norms of data points
1513
+ x_squared_norms = row_norms(X, squared=True)
1514
+
1515
+ if self._algorithm == "elkan":
1516
+ kmeans_single = _kmeans_single_elkan
1517
+ else:
1518
+ kmeans_single = _kmeans_single_lloyd
1519
+ self._check_mkl_vcomp(X, X.shape[0])
1520
+
1521
+ best_inertia, best_labels = None, None
1522
+
1523
+ for i in range(self._n_init):
1524
+ # Initialize centers
1525
+ centers_init = self._init_centroids(
1526
+ X,
1527
+ x_squared_norms=x_squared_norms,
1528
+ init=init,
1529
+ random_state=random_state,
1530
+ sample_weight=sample_weight,
1531
+ )
1532
+ if self.verbose:
1533
+ print("Initialization complete")
1534
+
1535
+ # run a k-means once
1536
+ labels, inertia, centers, n_iter_ = kmeans_single(
1537
+ X,
1538
+ sample_weight,
1539
+ centers_init,
1540
+ max_iter=self.max_iter,
1541
+ verbose=self.verbose,
1542
+ tol=self._tol,
1543
+ n_threads=self._n_threads,
1544
+ )
1545
+
1546
+ # determine if these results are the best so far
1547
+ # we chose a new run if it has a better inertia and the clustering is
1548
+ # different from the best so far (it's possible that the inertia is
1549
+ # slightly better even if the clustering is the same with potentially
1550
+ # permuted labels, due to rounding errors)
1551
+ if best_inertia is None or (
1552
+ inertia < best_inertia
1553
+ and not _is_same_clustering(labels, best_labels, self.n_clusters)
1554
+ ):
1555
+ best_labels = labels
1556
+ best_centers = centers
1557
+ best_inertia = inertia
1558
+ best_n_iter = n_iter_
1559
+
1560
+ if not sp.issparse(X):
1561
+ if not self.copy_x:
1562
+ X += X_mean
1563
+ best_centers += X_mean
1564
+
1565
+ distinct_clusters = len(set(best_labels))
1566
+ if distinct_clusters < self.n_clusters:
1567
+ warnings.warn(
1568
+ "Number of distinct clusters ({}) found smaller than "
1569
+ "n_clusters ({}). Possibly due to duplicate points "
1570
+ "in X.".format(distinct_clusters, self.n_clusters),
1571
+ ConvergenceWarning,
1572
+ stacklevel=2,
1573
+ )
1574
+
1575
+ self.cluster_centers_ = best_centers
1576
+ self._n_features_out = self.cluster_centers_.shape[0]
1577
+ self.labels_ = best_labels
1578
+ self.inertia_ = best_inertia
1579
+ self.n_iter_ = best_n_iter
1580
+ return self
1581
+
1582
+
1583
+ def _mini_batch_step(
1584
+ X,
1585
+ sample_weight,
1586
+ centers,
1587
+ centers_new,
1588
+ weight_sums,
1589
+ random_state,
1590
+ random_reassign=False,
1591
+ reassignment_ratio=0.01,
1592
+ verbose=False,
1593
+ n_threads=1,
1594
+ ):
1595
+ """Incremental update of the centers for the Minibatch K-Means algorithm.
1596
+
1597
+ Parameters
1598
+ ----------
1599
+
1600
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
1601
+ The original data array. If sparse, must be in CSR format.
1602
+
1603
+ x_squared_norms : ndarray of shape (n_samples,)
1604
+ Squared euclidean norm of each data point.
1605
+
1606
+ sample_weight : ndarray of shape (n_samples,)
1607
+ The weights for each observation in `X`.
1608
+
1609
+ centers : ndarray of shape (n_clusters, n_features)
1610
+ The cluster centers before the current iteration
1611
+
1612
+ centers_new : ndarray of shape (n_clusters, n_features)
1613
+ The cluster centers after the current iteration. Modified in-place.
1614
+
1615
+ weight_sums : ndarray of shape (n_clusters,)
1616
+ The vector in which we keep track of the numbers of points in a
1617
+ cluster. This array is modified in place.
1618
+
1619
+ random_state : RandomState instance
1620
+ Determines random number generation for low count centers reassignment.
1621
+ See :term:`Glossary <random_state>`.
1622
+
1623
+ random_reassign : boolean, default=False
1624
+ If True, centers with very low counts are randomly reassigned
1625
+ to observations.
1626
+
1627
+ reassignment_ratio : float, default=0.01
1628
+ Control the fraction of the maximum number of counts for a
1629
+ center to be reassigned. A higher value means that low count
1630
+ centers are more likely to be reassigned, which means that the
1631
+ model will take longer to converge, but should converge in a
1632
+ better clustering.
1633
+
1634
+ verbose : bool, default=False
1635
+ Controls the verbosity.
1636
+
1637
+ n_threads : int, default=1
1638
+ The number of OpenMP threads to use for the computation.
1639
+
1640
+ Returns
1641
+ -------
1642
+ inertia : float
1643
+ Sum of squared distances of samples to their closest cluster center.
1644
+ The inertia is computed after finding the labels and before updating
1645
+ the centers.
1646
+ """
1647
+ # Perform label assignment to nearest centers
1648
+ # For better efficiency, it's better to run _mini_batch_step in a
1649
+ # threadpool_limit context than using _labels_inertia_threadpool_limit here
1650
+ labels, inertia = _labels_inertia(X, sample_weight, centers, n_threads=n_threads)
1651
+
1652
+ # Update centers according to the labels
1653
+ if sp.issparse(X):
1654
+ _minibatch_update_sparse(
1655
+ X, sample_weight, centers, centers_new, weight_sums, labels, n_threads
1656
+ )
1657
+ else:
1658
+ _minibatch_update_dense(
1659
+ X,
1660
+ sample_weight,
1661
+ centers,
1662
+ centers_new,
1663
+ weight_sums,
1664
+ labels,
1665
+ n_threads,
1666
+ )
1667
+
1668
+ # Reassign clusters that have very low weight
1669
+ if random_reassign and reassignment_ratio > 0:
1670
+ to_reassign = weight_sums < reassignment_ratio * weight_sums.max()
1671
+
1672
+ # pick at most .5 * batch_size samples as new centers
1673
+ if to_reassign.sum() > 0.5 * X.shape[0]:
1674
+ indices_dont_reassign = np.argsort(weight_sums)[int(0.5 * X.shape[0]) :]
1675
+ to_reassign[indices_dont_reassign] = False
1676
+ n_reassigns = to_reassign.sum()
1677
+
1678
+ if n_reassigns:
1679
+ # Pick new clusters amongst observations with uniform probability
1680
+ new_centers = random_state.choice(
1681
+ X.shape[0], replace=False, size=n_reassigns
1682
+ )
1683
+ if verbose:
1684
+ print(f"[MiniBatchKMeans] Reassigning {n_reassigns} cluster centers.")
1685
+
1686
+ if sp.issparse(X):
1687
+ assign_rows_csr(
1688
+ X,
1689
+ new_centers.astype(np.intp, copy=False),
1690
+ np.where(to_reassign)[0].astype(np.intp, copy=False),
1691
+ centers_new,
1692
+ )
1693
+ else:
1694
+ centers_new[to_reassign] = X[new_centers]
1695
+
1696
+ # reset counts of reassigned centers, but don't reset them too small
1697
+ # to avoid instant reassignment. This is a pretty dirty hack as it
1698
+ # also modifies the learning rates.
1699
+ weight_sums[to_reassign] = np.min(weight_sums[~to_reassign])
1700
+
1701
+ return inertia
1702
+
1703
+
1704
+ class MiniBatchKMeans(_BaseKMeans):
1705
+ """
1706
+ Mini-Batch K-Means clustering.
1707
+
1708
+ Read more in the :ref:`User Guide <mini_batch_kmeans>`.
1709
+
1710
+ Parameters
1711
+ ----------
1712
+
1713
+ n_clusters : int, default=8
1714
+ The number of clusters to form as well as the number of
1715
+ centroids to generate.
1716
+
1717
+ init : {'k-means++', 'random'}, callable or array-like of shape \
1718
+ (n_clusters, n_features), default='k-means++'
1719
+ Method for initialization:
1720
+
1721
+ 'k-means++' : selects initial cluster centroids using sampling based on
1722
+ an empirical probability distribution of the points' contribution to the
1723
+ overall inertia. This technique speeds up convergence. The algorithm
1724
+ implemented is "greedy k-means++". It differs from the vanilla k-means++
1725
+ by making several trials at each sampling step and choosing the best centroid
1726
+ among them.
1727
+
1728
+ 'random': choose `n_clusters` observations (rows) at random from data
1729
+ for the initial centroids.
1730
+
1731
+ If an array is passed, it should be of shape (n_clusters, n_features)
1732
+ and gives the initial centers.
1733
+
1734
+ If a callable is passed, it should take arguments X, n_clusters and a
1735
+ random state and return an initialization.
1736
+
1737
+ max_iter : int, default=100
1738
+ Maximum number of iterations over the complete dataset before
1739
+ stopping independently of any early stopping criterion heuristics.
1740
+
1741
+ batch_size : int, default=1024
1742
+ Size of the mini batches.
1743
+ For faster computations, you can set the ``batch_size`` greater than
1744
+ 256 * number of cores to enable parallelism on all cores.
1745
+
1746
+ .. versionchanged:: 1.0
1747
+ `batch_size` default changed from 100 to 1024.
1748
+
1749
+ verbose : int, default=0
1750
+ Verbosity mode.
1751
+
1752
+ compute_labels : bool, default=True
1753
+ Compute label assignment and inertia for the complete dataset
1754
+ once the minibatch optimization has converged in fit.
1755
+
1756
+ random_state : int, RandomState instance or None, default=None
1757
+ Determines random number generation for centroid initialization and
1758
+ random reassignment. Use an int to make the randomness deterministic.
1759
+ See :term:`Glossary <random_state>`.
1760
+
1761
+ tol : float, default=0.0
1762
+ Control early stopping based on the relative center changes as
1763
+ measured by a smoothed, variance-normalized of the mean center
1764
+ squared position changes. This early stopping heuristics is
1765
+ closer to the one used for the batch variant of the algorithms
1766
+ but induces a slight computational and memory overhead over the
1767
+ inertia heuristic.
1768
+
1769
+ To disable convergence detection based on normalized center
1770
+ change, set tol to 0.0 (default).
1771
+
1772
+ max_no_improvement : int, default=10
1773
+ Control early stopping based on the consecutive number of mini
1774
+ batches that does not yield an improvement on the smoothed inertia.
1775
+
1776
+ To disable convergence detection based on inertia, set
1777
+ max_no_improvement to None.
1778
+
1779
+ init_size : int, default=None
1780
+ Number of samples to randomly sample for speeding up the
1781
+ initialization (sometimes at the expense of accuracy): the
1782
+ only algorithm is initialized by running a batch KMeans on a
1783
+ random subset of the data. This needs to be larger than n_clusters.
1784
+
1785
+ If `None`, the heuristic is `init_size = 3 * batch_size` if
1786
+ `3 * batch_size < n_clusters`, else `init_size = 3 * n_clusters`.
1787
+
1788
+ n_init : 'auto' or int, default="auto"
1789
+ Number of random initializations that are tried.
1790
+ In contrast to KMeans, the algorithm is only run once, using the best of
1791
+ the `n_init` initializations as measured by inertia. Several runs are
1792
+ recommended for sparse high-dimensional problems (see
1793
+ :ref:`kmeans_sparse_high_dim`).
1794
+
1795
+ When `n_init='auto'`, the number of runs depends on the value of init:
1796
+ 3 if using `init='random'` or `init` is a callable;
1797
+ 1 if using `init='k-means++'` or `init` is an array-like.
1798
+
1799
+ .. versionadded:: 1.2
1800
+ Added 'auto' option for `n_init`.
1801
+
1802
+ .. versionchanged:: 1.4
1803
+ Default value for `n_init` changed to `'auto'` in version.
1804
+
1805
+ reassignment_ratio : float, default=0.01
1806
+ Control the fraction of the maximum number of counts for a center to
1807
+ be reassigned. A higher value means that low count centers are more
1808
+ easily reassigned, which means that the model will take longer to
1809
+ converge, but should converge in a better clustering. However, too high
1810
+ a value may cause convergence issues, especially with a small batch
1811
+ size.
1812
+
1813
+ Attributes
1814
+ ----------
1815
+
1816
+ cluster_centers_ : ndarray of shape (n_clusters, n_features)
1817
+ Coordinates of cluster centers.
1818
+
1819
+ labels_ : ndarray of shape (n_samples,)
1820
+ Labels of each point (if compute_labels is set to True).
1821
+
1822
+ inertia_ : float
1823
+ The value of the inertia criterion associated with the chosen
1824
+ partition if compute_labels is set to True. If compute_labels is set to
1825
+ False, it's an approximation of the inertia based on an exponentially
1826
+ weighted average of the batch inertiae.
1827
+ The inertia is defined as the sum of square distances of samples to
1828
+ their cluster center, weighted by the sample weights if provided.
1829
+
1830
+ n_iter_ : int
1831
+ Number of iterations over the full dataset.
1832
+
1833
+ n_steps_ : int
1834
+ Number of minibatches processed.
1835
+
1836
+ .. versionadded:: 1.0
1837
+
1838
+ n_features_in_ : int
1839
+ Number of features seen during :term:`fit`.
1840
+
1841
+ .. versionadded:: 0.24
1842
+
1843
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1844
+ Names of features seen during :term:`fit`. Defined only when `X`
1845
+ has feature names that are all strings.
1846
+
1847
+ .. versionadded:: 1.0
1848
+
1849
+ See Also
1850
+ --------
1851
+ KMeans : The classic implementation of the clustering method based on the
1852
+ Lloyd's algorithm. It consumes the whole set of input data at each
1853
+ iteration.
1854
+
1855
+ Notes
1856
+ -----
1857
+ See https://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
1858
+
1859
+ When there are too few points in the dataset, some centers may be
1860
+ duplicated, which means that a proper clustering in terms of the number
1861
+ of requesting clusters and the number of returned clusters will not
1862
+ always match. One solution is to set `reassignment_ratio=0`, which
1863
+ prevents reassignments of clusters that are too small.
1864
+
1865
+ Examples
1866
+ --------
1867
+ >>> from sklearn.cluster import MiniBatchKMeans
1868
+ >>> import numpy as np
1869
+ >>> X = np.array([[1, 2], [1, 4], [1, 0],
1870
+ ... [4, 2], [4, 0], [4, 4],
1871
+ ... [4, 5], [0, 1], [2, 2],
1872
+ ... [3, 2], [5, 5], [1, -1]])
1873
+ >>> # manually fit on batches
1874
+ >>> kmeans = MiniBatchKMeans(n_clusters=2,
1875
+ ... random_state=0,
1876
+ ... batch_size=6,
1877
+ ... n_init="auto")
1878
+ >>> kmeans = kmeans.partial_fit(X[0:6,:])
1879
+ >>> kmeans = kmeans.partial_fit(X[6:12,:])
1880
+ >>> kmeans.cluster_centers_
1881
+ array([[3.375, 3. ],
1882
+ [0.75 , 0.5 ]])
1883
+ >>> kmeans.predict([[0, 0], [4, 4]])
1884
+ array([1, 0], dtype=int32)
1885
+ >>> # fit on the whole data
1886
+ >>> kmeans = MiniBatchKMeans(n_clusters=2,
1887
+ ... random_state=0,
1888
+ ... batch_size=6,
1889
+ ... max_iter=10,
1890
+ ... n_init="auto").fit(X)
1891
+ >>> kmeans.cluster_centers_
1892
+ array([[3.55102041, 2.48979592],
1893
+ [1.06896552, 1. ]])
1894
+ >>> kmeans.predict([[0, 0], [4, 4]])
1895
+ array([1, 0], dtype=int32)
1896
+ """
1897
+
1898
+ _parameter_constraints: dict = {
1899
+ **_BaseKMeans._parameter_constraints,
1900
+ "batch_size": [Interval(Integral, 1, None, closed="left")],
1901
+ "compute_labels": ["boolean"],
1902
+ "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None],
1903
+ "init_size": [Interval(Integral, 1, None, closed="left"), None],
1904
+ "reassignment_ratio": [Interval(Real, 0, None, closed="left")],
1905
+ }
1906
+
1907
+ def __init__(
1908
+ self,
1909
+ n_clusters=8,
1910
+ *,
1911
+ init="k-means++",
1912
+ max_iter=100,
1913
+ batch_size=1024,
1914
+ verbose=0,
1915
+ compute_labels=True,
1916
+ random_state=None,
1917
+ tol=0.0,
1918
+ max_no_improvement=10,
1919
+ init_size=None,
1920
+ n_init="auto",
1921
+ reassignment_ratio=0.01,
1922
+ ):
1923
+ super().__init__(
1924
+ n_clusters=n_clusters,
1925
+ init=init,
1926
+ max_iter=max_iter,
1927
+ verbose=verbose,
1928
+ random_state=random_state,
1929
+ tol=tol,
1930
+ n_init=n_init,
1931
+ )
1932
+
1933
+ self.max_no_improvement = max_no_improvement
1934
+ self.batch_size = batch_size
1935
+ self.compute_labels = compute_labels
1936
+ self.init_size = init_size
1937
+ self.reassignment_ratio = reassignment_ratio
1938
+
1939
+ def _check_params_vs_input(self, X):
1940
+ super()._check_params_vs_input(X, default_n_init=3)
1941
+
1942
+ self._batch_size = min(self.batch_size, X.shape[0])
1943
+
1944
+ # init_size
1945
+ self._init_size = self.init_size
1946
+ if self._init_size is None:
1947
+ self._init_size = 3 * self._batch_size
1948
+ if self._init_size < self.n_clusters:
1949
+ self._init_size = 3 * self.n_clusters
1950
+ elif self._init_size < self.n_clusters:
1951
+ warnings.warn(
1952
+ (
1953
+ f"init_size={self._init_size} should be larger than "
1954
+ f"n_clusters={self.n_clusters}. Setting it to "
1955
+ "min(3*n_clusters, n_samples)"
1956
+ ),
1957
+ RuntimeWarning,
1958
+ stacklevel=2,
1959
+ )
1960
+ self._init_size = 3 * self.n_clusters
1961
+ self._init_size = min(self._init_size, X.shape[0])
1962
+
1963
+ # reassignment_ratio
1964
+ if self.reassignment_ratio < 0:
1965
+ raise ValueError(
1966
+ "reassignment_ratio should be >= 0, got "
1967
+ f"{self.reassignment_ratio} instead."
1968
+ )
1969
+
1970
+ def _warn_mkl_vcomp(self, n_active_threads):
1971
+ """Warn when vcomp and mkl are both present"""
1972
+ warnings.warn(
1973
+ "MiniBatchKMeans is known to have a memory leak on "
1974
+ "Windows with MKL, when there are less chunks than "
1975
+ "available threads. You can prevent it by setting "
1976
+ f"batch_size >= {self._n_threads * CHUNK_SIZE} or by "
1977
+ "setting the environment variable "
1978
+ f"OMP_NUM_THREADS={n_active_threads}"
1979
+ )
1980
+
1981
+ def _mini_batch_convergence(
1982
+ self, step, n_steps, n_samples, centers_squared_diff, batch_inertia
1983
+ ):
1984
+ """Helper function to encapsulate the early stopping logic"""
1985
+ # Normalize inertia to be able to compare values when
1986
+ # batch_size changes
1987
+ batch_inertia /= self._batch_size
1988
+
1989
+ # count steps starting from 1 for user friendly verbose mode.
1990
+ step = step + 1
1991
+
1992
+ # Ignore first iteration because it's inertia from initialization.
1993
+ if step == 1:
1994
+ if self.verbose:
1995
+ print(
1996
+ f"Minibatch step {step}/{n_steps}: mean batch "
1997
+ f"inertia: {batch_inertia}"
1998
+ )
1999
+ return False
2000
+
2001
+ # Compute an Exponentially Weighted Average of the inertia to
2002
+ # monitor the convergence while discarding minibatch-local stochastic
2003
+ # variability: https://en.wikipedia.org/wiki/Moving_average
2004
+ if self._ewa_inertia is None:
2005
+ self._ewa_inertia = batch_inertia
2006
+ else:
2007
+ alpha = self._batch_size * 2.0 / (n_samples + 1)
2008
+ alpha = min(alpha, 1)
2009
+ self._ewa_inertia = self._ewa_inertia * (1 - alpha) + batch_inertia * alpha
2010
+
2011
+ # Log progress to be able to monitor convergence
2012
+ if self.verbose:
2013
+ print(
2014
+ f"Minibatch step {step}/{n_steps}: mean batch inertia: "
2015
+ f"{batch_inertia}, ewa inertia: {self._ewa_inertia}"
2016
+ )
2017
+
2018
+ # Early stopping based on absolute tolerance on squared change of
2019
+ # centers position
2020
+ if self._tol > 0.0 and centers_squared_diff <= self._tol:
2021
+ if self.verbose:
2022
+ print(f"Converged (small centers change) at step {step}/{n_steps}")
2023
+ return True
2024
+
2025
+ # Early stopping heuristic due to lack of improvement on smoothed
2026
+ # inertia
2027
+ if self._ewa_inertia_min is None or self._ewa_inertia < self._ewa_inertia_min:
2028
+ self._no_improvement = 0
2029
+ self._ewa_inertia_min = self._ewa_inertia
2030
+ else:
2031
+ self._no_improvement += 1
2032
+
2033
+ if (
2034
+ self.max_no_improvement is not None
2035
+ and self._no_improvement >= self.max_no_improvement
2036
+ ):
2037
+ if self.verbose:
2038
+ print(
2039
+ "Converged (lack of improvement in inertia) at step "
2040
+ f"{step}/{n_steps}"
2041
+ )
2042
+ return True
2043
+
2044
+ return False
2045
+
2046
+ def _random_reassign(self):
2047
+ """Check if a random reassignment needs to be done.
2048
+
2049
+ Do random reassignments each time 10 * n_clusters samples have been
2050
+ processed.
2051
+
2052
+ If there are empty clusters we always want to reassign.
2053
+ """
2054
+ self._n_since_last_reassign += self._batch_size
2055
+ if (self._counts == 0).any() or self._n_since_last_reassign >= (
2056
+ 10 * self.n_clusters
2057
+ ):
2058
+ self._n_since_last_reassign = 0
2059
+ return True
2060
+ return False
2061
+
2062
+ @_fit_context(prefer_skip_nested_validation=True)
2063
+ def fit(self, X, y=None, sample_weight=None):
2064
+ """Compute the centroids on X by chunking it into mini-batches.
2065
+
2066
+ Parameters
2067
+ ----------
2068
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2069
+ Training instances to cluster. It must be noted that the data
2070
+ will be converted to C ordering, which will cause a memory copy
2071
+ if the given data is not C-contiguous.
2072
+ If a sparse matrix is passed, a copy will be made if it's not in
2073
+ CSR format.
2074
+
2075
+ y : Ignored
2076
+ Not used, present here for API consistency by convention.
2077
+
2078
+ sample_weight : array-like of shape (n_samples,), default=None
2079
+ The weights for each observation in X. If None, all observations
2080
+ are assigned equal weight. `sample_weight` is not used during
2081
+ initialization if `init` is a callable or a user provided array.
2082
+
2083
+ .. versionadded:: 0.20
2084
+
2085
+ Returns
2086
+ -------
2087
+ self : object
2088
+ Fitted estimator.
2089
+ """
2090
+ X = self._validate_data(
2091
+ X,
2092
+ accept_sparse="csr",
2093
+ dtype=[np.float64, np.float32],
2094
+ order="C",
2095
+ accept_large_sparse=False,
2096
+ )
2097
+
2098
+ self._check_params_vs_input(X)
2099
+ random_state = check_random_state(self.random_state)
2100
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
2101
+ self._n_threads = _openmp_effective_n_threads()
2102
+ n_samples, n_features = X.shape
2103
+
2104
+ # Validate init array
2105
+ init = self.init
2106
+ if _is_arraylike_not_scalar(init):
2107
+ init = check_array(init, dtype=X.dtype, copy=True, order="C")
2108
+ self._validate_center_shape(X, init)
2109
+
2110
+ self._check_mkl_vcomp(X, self._batch_size)
2111
+
2112
+ # precompute squared norms of data points
2113
+ x_squared_norms = row_norms(X, squared=True)
2114
+
2115
+ # Validation set for the init
2116
+ validation_indices = random_state.randint(0, n_samples, self._init_size)
2117
+ X_valid = X[validation_indices]
2118
+ sample_weight_valid = sample_weight[validation_indices]
2119
+
2120
+ # perform several inits with random subsets
2121
+ best_inertia = None
2122
+ for init_idx in range(self._n_init):
2123
+ if self.verbose:
2124
+ print(f"Init {init_idx + 1}/{self._n_init} with method {init}")
2125
+
2126
+ # Initialize the centers using only a fraction of the data as we
2127
+ # expect n_samples to be very large when using MiniBatchKMeans.
2128
+ cluster_centers = self._init_centroids(
2129
+ X,
2130
+ x_squared_norms=x_squared_norms,
2131
+ init=init,
2132
+ random_state=random_state,
2133
+ init_size=self._init_size,
2134
+ sample_weight=sample_weight,
2135
+ )
2136
+
2137
+ # Compute inertia on a validation set.
2138
+ _, inertia = _labels_inertia_threadpool_limit(
2139
+ X_valid,
2140
+ sample_weight_valid,
2141
+ cluster_centers,
2142
+ n_threads=self._n_threads,
2143
+ )
2144
+
2145
+ if self.verbose:
2146
+ print(f"Inertia for init {init_idx + 1}/{self._n_init}: {inertia}")
2147
+ if best_inertia is None or inertia < best_inertia:
2148
+ init_centers = cluster_centers
2149
+ best_inertia = inertia
2150
+
2151
+ centers = init_centers
2152
+ centers_new = np.empty_like(centers)
2153
+
2154
+ # Initialize counts
2155
+ self._counts = np.zeros(self.n_clusters, dtype=X.dtype)
2156
+
2157
+ # Attributes to monitor the convergence
2158
+ self._ewa_inertia = None
2159
+ self._ewa_inertia_min = None
2160
+ self._no_improvement = 0
2161
+
2162
+ # Initialize number of samples seen since last reassignment
2163
+ self._n_since_last_reassign = 0
2164
+
2165
+ n_steps = (self.max_iter * n_samples) // self._batch_size
2166
+
2167
+ with threadpool_limits(limits=1, user_api="blas"):
2168
+ # Perform the iterative optimization until convergence
2169
+ for i in range(n_steps):
2170
+ # Sample a minibatch from the full dataset
2171
+ minibatch_indices = random_state.randint(0, n_samples, self._batch_size)
2172
+
2173
+ # Perform the actual update step on the minibatch data
2174
+ batch_inertia = _mini_batch_step(
2175
+ X=X[minibatch_indices],
2176
+ sample_weight=sample_weight[minibatch_indices],
2177
+ centers=centers,
2178
+ centers_new=centers_new,
2179
+ weight_sums=self._counts,
2180
+ random_state=random_state,
2181
+ random_reassign=self._random_reassign(),
2182
+ reassignment_ratio=self.reassignment_ratio,
2183
+ verbose=self.verbose,
2184
+ n_threads=self._n_threads,
2185
+ )
2186
+
2187
+ if self._tol > 0.0:
2188
+ centers_squared_diff = np.sum((centers_new - centers) ** 2)
2189
+ else:
2190
+ centers_squared_diff = 0
2191
+
2192
+ centers, centers_new = centers_new, centers
2193
+
2194
+ # Monitor convergence and do early stopping if necessary
2195
+ if self._mini_batch_convergence(
2196
+ i, n_steps, n_samples, centers_squared_diff, batch_inertia
2197
+ ):
2198
+ break
2199
+
2200
+ self.cluster_centers_ = centers
2201
+ self._n_features_out = self.cluster_centers_.shape[0]
2202
+
2203
+ self.n_steps_ = i + 1
2204
+ self.n_iter_ = int(np.ceil(((i + 1) * self._batch_size) / n_samples))
2205
+
2206
+ if self.compute_labels:
2207
+ self.labels_, self.inertia_ = _labels_inertia_threadpool_limit(
2208
+ X,
2209
+ sample_weight,
2210
+ self.cluster_centers_,
2211
+ n_threads=self._n_threads,
2212
+ )
2213
+ else:
2214
+ self.inertia_ = self._ewa_inertia * n_samples
2215
+
2216
+ return self
2217
+
2218
+ @_fit_context(prefer_skip_nested_validation=True)
2219
+ def partial_fit(self, X, y=None, sample_weight=None):
2220
+ """Update k means estimate on a single mini-batch X.
2221
+
2222
+ Parameters
2223
+ ----------
2224
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2225
+ Training instances to cluster. It must be noted that the data
2226
+ will be converted to C ordering, which will cause a memory copy
2227
+ if the given data is not C-contiguous.
2228
+ If a sparse matrix is passed, a copy will be made if it's not in
2229
+ CSR format.
2230
+
2231
+ y : Ignored
2232
+ Not used, present here for API consistency by convention.
2233
+
2234
+ sample_weight : array-like of shape (n_samples,), default=None
2235
+ The weights for each observation in X. If None, all observations
2236
+ are assigned equal weight. `sample_weight` is not used during
2237
+ initialization if `init` is a callable or a user provided array.
2238
+
2239
+ Returns
2240
+ -------
2241
+ self : object
2242
+ Return updated estimator.
2243
+ """
2244
+ has_centers = hasattr(self, "cluster_centers_")
2245
+
2246
+ X = self._validate_data(
2247
+ X,
2248
+ accept_sparse="csr",
2249
+ dtype=[np.float64, np.float32],
2250
+ order="C",
2251
+ accept_large_sparse=False,
2252
+ reset=not has_centers,
2253
+ )
2254
+
2255
+ self._random_state = getattr(
2256
+ self, "_random_state", check_random_state(self.random_state)
2257
+ )
2258
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
2259
+ self.n_steps_ = getattr(self, "n_steps_", 0)
2260
+
2261
+ # precompute squared norms of data points
2262
+ x_squared_norms = row_norms(X, squared=True)
2263
+
2264
+ if not has_centers:
2265
+ # this instance has not been fitted yet (fit or partial_fit)
2266
+ self._check_params_vs_input(X)
2267
+ self._n_threads = _openmp_effective_n_threads()
2268
+
2269
+ # Validate init array
2270
+ init = self.init
2271
+ if _is_arraylike_not_scalar(init):
2272
+ init = check_array(init, dtype=X.dtype, copy=True, order="C")
2273
+ self._validate_center_shape(X, init)
2274
+
2275
+ self._check_mkl_vcomp(X, X.shape[0])
2276
+
2277
+ # initialize the cluster centers
2278
+ self.cluster_centers_ = self._init_centroids(
2279
+ X,
2280
+ x_squared_norms=x_squared_norms,
2281
+ init=init,
2282
+ random_state=self._random_state,
2283
+ init_size=self._init_size,
2284
+ sample_weight=sample_weight,
2285
+ )
2286
+
2287
+ # Initialize counts
2288
+ self._counts = np.zeros(self.n_clusters, dtype=X.dtype)
2289
+
2290
+ # Initialize number of samples seen since last reassignment
2291
+ self._n_since_last_reassign = 0
2292
+
2293
+ with threadpool_limits(limits=1, user_api="blas"):
2294
+ _mini_batch_step(
2295
+ X,
2296
+ sample_weight=sample_weight,
2297
+ centers=self.cluster_centers_,
2298
+ centers_new=self.cluster_centers_,
2299
+ weight_sums=self._counts,
2300
+ random_state=self._random_state,
2301
+ random_reassign=self._random_reassign(),
2302
+ reassignment_ratio=self.reassignment_ratio,
2303
+ verbose=self.verbose,
2304
+ n_threads=self._n_threads,
2305
+ )
2306
+
2307
+ if self.compute_labels:
2308
+ self.labels_, self.inertia_ = _labels_inertia_threadpool_limit(
2309
+ X,
2310
+ sample_weight,
2311
+ self.cluster_centers_,
2312
+ n_threads=self._n_threads,
2313
+ )
2314
+
2315
+ self.n_steps_ += 1
2316
+ self._n_features_out = self.cluster_centers_.shape[0]
2317
+
2318
+ return self
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_mean_shift.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Mean shift clustering algorithm.
2
+
3
+ Mean shift clustering aims to discover *blobs* in a smooth density of
4
+ samples. It is a centroid based algorithm, which works by updating candidates
5
+ for centroids to be the mean of the points within a given region. These
6
+ candidates are then filtered in a post-processing stage to eliminate
7
+ near-duplicates to form the final set of centroids.
8
+
9
+ Seeding is performed using a binning technique for scalability.
10
+ """
11
+
12
+ # Authors: Conrad Lee <[email protected]>
13
+ # Alexandre Gramfort <[email protected]>
14
+ # Gael Varoquaux <[email protected]>
15
+ # Martino Sorbaro <[email protected]>
16
+
17
+ import warnings
18
+ from collections import defaultdict
19
+ from numbers import Integral, Real
20
+
21
+ import numpy as np
22
+
23
+ from .._config import config_context
24
+ from ..base import BaseEstimator, ClusterMixin, _fit_context
25
+ from ..metrics.pairwise import pairwise_distances_argmin
26
+ from ..neighbors import NearestNeighbors
27
+ from ..utils import check_array, check_random_state, gen_batches
28
+ from ..utils._param_validation import Interval, validate_params
29
+ from ..utils.parallel import Parallel, delayed
30
+ from ..utils.validation import check_is_fitted
31
+
32
+
33
+ @validate_params(
34
+ {
35
+ "X": ["array-like"],
36
+ "quantile": [Interval(Real, 0, 1, closed="both")],
37
+ "n_samples": [Interval(Integral, 1, None, closed="left"), None],
38
+ "random_state": ["random_state"],
39
+ "n_jobs": [Integral, None],
40
+ },
41
+ prefer_skip_nested_validation=True,
42
+ )
43
+ def estimate_bandwidth(X, *, quantile=0.3, n_samples=None, random_state=0, n_jobs=None):
44
+ """Estimate the bandwidth to use with the mean-shift algorithm.
45
+
46
+ This function takes time at least quadratic in `n_samples`. For large
47
+ datasets, it is wise to subsample by setting `n_samples`. Alternatively,
48
+ the parameter `bandwidth` can be set to a small value without estimating
49
+ it.
50
+
51
+ Parameters
52
+ ----------
53
+ X : array-like of shape (n_samples, n_features)
54
+ Input points.
55
+
56
+ quantile : float, default=0.3
57
+ Should be between [0, 1]
58
+ 0.5 means that the median of all pairwise distances is used.
59
+
60
+ n_samples : int, default=None
61
+ The number of samples to use. If not given, all samples are used.
62
+
63
+ random_state : int, RandomState instance, default=None
64
+ The generator used to randomly select the samples from input points
65
+ for bandwidth estimation. Use an int to make the randomness
66
+ deterministic.
67
+ See :term:`Glossary <random_state>`.
68
+
69
+ n_jobs : int, default=None
70
+ The number of parallel jobs to run for neighbors search.
71
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
72
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
73
+ for more details.
74
+
75
+ Returns
76
+ -------
77
+ bandwidth : float
78
+ The bandwidth parameter.
79
+
80
+ Examples
81
+ --------
82
+ >>> import numpy as np
83
+ >>> from sklearn.cluster import estimate_bandwidth
84
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
85
+ ... [4, 7], [3, 5], [3, 6]])
86
+ >>> estimate_bandwidth(X, quantile=0.5)
87
+ 1.61...
88
+ """
89
+ X = check_array(X)
90
+
91
+ random_state = check_random_state(random_state)
92
+ if n_samples is not None:
93
+ idx = random_state.permutation(X.shape[0])[:n_samples]
94
+ X = X[idx]
95
+ n_neighbors = int(X.shape[0] * quantile)
96
+ if n_neighbors < 1: # cannot fit NearestNeighbors with n_neighbors = 0
97
+ n_neighbors = 1
98
+ nbrs = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=n_jobs)
99
+ nbrs.fit(X)
100
+
101
+ bandwidth = 0.0
102
+ for batch in gen_batches(len(X), 500):
103
+ d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
104
+ bandwidth += np.max(d, axis=1).sum()
105
+
106
+ return bandwidth / X.shape[0]
107
+
108
+
109
+ # separate function for each seed's iterative loop
110
+ def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
111
+ # For each seed, climb gradient until convergence or max_iter
112
+ bandwidth = nbrs.get_params()["radius"]
113
+ stop_thresh = 1e-3 * bandwidth # when mean has converged
114
+ completed_iterations = 0
115
+ while True:
116
+ # Find mean of points within bandwidth
117
+ i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth, return_distance=False)[0]
118
+ points_within = X[i_nbrs]
119
+ if len(points_within) == 0:
120
+ break # Depending on seeding strategy this condition may occur
121
+ my_old_mean = my_mean # save the old mean
122
+ my_mean = np.mean(points_within, axis=0)
123
+ # If converged or at max_iter, adds the cluster
124
+ if (
125
+ np.linalg.norm(my_mean - my_old_mean) < stop_thresh
126
+ or completed_iterations == max_iter
127
+ ):
128
+ break
129
+ completed_iterations += 1
130
+ return tuple(my_mean), len(points_within), completed_iterations
131
+
132
+
133
+ @validate_params(
134
+ {"X": ["array-like"]},
135
+ prefer_skip_nested_validation=False,
136
+ )
137
+ def mean_shift(
138
+ X,
139
+ *,
140
+ bandwidth=None,
141
+ seeds=None,
142
+ bin_seeding=False,
143
+ min_bin_freq=1,
144
+ cluster_all=True,
145
+ max_iter=300,
146
+ n_jobs=None,
147
+ ):
148
+ """Perform mean shift clustering of data using a flat kernel.
149
+
150
+ Read more in the :ref:`User Guide <mean_shift>`.
151
+
152
+ Parameters
153
+ ----------
154
+
155
+ X : array-like of shape (n_samples, n_features)
156
+ Input data.
157
+
158
+ bandwidth : float, default=None
159
+ Kernel bandwidth. If not None, must be in the range [0, +inf).
160
+
161
+ If None, the bandwidth is determined using a heuristic based on
162
+ the median of all pairwise distances. This will take quadratic time in
163
+ the number of samples. The sklearn.cluster.estimate_bandwidth function
164
+ can be used to do this more efficiently.
165
+
166
+ seeds : array-like of shape (n_seeds, n_features) or None
167
+ Point used as initial kernel locations. If None and bin_seeding=False,
168
+ each data point is used as a seed. If None and bin_seeding=True,
169
+ see bin_seeding.
170
+
171
+ bin_seeding : bool, default=False
172
+ If true, initial kernel locations are not locations of all
173
+ points, but rather the location of the discretized version of
174
+ points, where points are binned onto a grid whose coarseness
175
+ corresponds to the bandwidth. Setting this option to True will speed
176
+ up the algorithm because fewer seeds will be initialized.
177
+ Ignored if seeds argument is not None.
178
+
179
+ min_bin_freq : int, default=1
180
+ To speed up the algorithm, accept only those bins with at least
181
+ min_bin_freq points as seeds.
182
+
183
+ cluster_all : bool, default=True
184
+ If true, then all points are clustered, even those orphans that are
185
+ not within any kernel. Orphans are assigned to the nearest kernel.
186
+ If false, then orphans are given cluster label -1.
187
+
188
+ max_iter : int, default=300
189
+ Maximum number of iterations, per seed point before the clustering
190
+ operation terminates (for that seed point), if has not converged yet.
191
+
192
+ n_jobs : int, default=None
193
+ The number of jobs to use for the computation. The following tasks benefit
194
+ from the parallelization:
195
+
196
+ - The search of nearest neighbors for bandwidth estimation and label
197
+ assignments. See the details in the docstring of the
198
+ ``NearestNeighbors`` class.
199
+ - Hill-climbing optimization for all seeds.
200
+
201
+ See :term:`Glossary <n_jobs>` for more details.
202
+
203
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
204
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
205
+ for more details.
206
+
207
+ .. versionadded:: 0.17
208
+ Parallel Execution using *n_jobs*.
209
+
210
+ Returns
211
+ -------
212
+
213
+ cluster_centers : ndarray of shape (n_clusters, n_features)
214
+ Coordinates of cluster centers.
215
+
216
+ labels : ndarray of shape (n_samples,)
217
+ Cluster labels for each point.
218
+
219
+ Notes
220
+ -----
221
+ For an example, see :ref:`examples/cluster/plot_mean_shift.py
222
+ <sphx_glr_auto_examples_cluster_plot_mean_shift.py>`.
223
+
224
+ Examples
225
+ --------
226
+ >>> import numpy as np
227
+ >>> from sklearn.cluster import mean_shift
228
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
229
+ ... [4, 7], [3, 5], [3, 6]])
230
+ >>> cluster_centers, labels = mean_shift(X, bandwidth=2)
231
+ >>> cluster_centers
232
+ array([[3.33..., 6. ],
233
+ [1.33..., 0.66...]])
234
+ >>> labels
235
+ array([1, 1, 1, 0, 0, 0])
236
+ """
237
+ model = MeanShift(
238
+ bandwidth=bandwidth,
239
+ seeds=seeds,
240
+ min_bin_freq=min_bin_freq,
241
+ bin_seeding=bin_seeding,
242
+ cluster_all=cluster_all,
243
+ n_jobs=n_jobs,
244
+ max_iter=max_iter,
245
+ ).fit(X)
246
+ return model.cluster_centers_, model.labels_
247
+
248
+
249
+ def get_bin_seeds(X, bin_size, min_bin_freq=1):
250
+ """Find seeds for mean_shift.
251
+
252
+ Finds seeds by first binning data onto a grid whose lines are
253
+ spaced bin_size apart, and then choosing those bins with at least
254
+ min_bin_freq points.
255
+
256
+ Parameters
257
+ ----------
258
+
259
+ X : array-like of shape (n_samples, n_features)
260
+ Input points, the same points that will be used in mean_shift.
261
+
262
+ bin_size : float
263
+ Controls the coarseness of the binning. Smaller values lead
264
+ to more seeding (which is computationally more expensive). If you're
265
+ not sure how to set this, set it to the value of the bandwidth used
266
+ in clustering.mean_shift.
267
+
268
+ min_bin_freq : int, default=1
269
+ Only bins with at least min_bin_freq will be selected as seeds.
270
+ Raising this value decreases the number of seeds found, which
271
+ makes mean_shift computationally cheaper.
272
+
273
+ Returns
274
+ -------
275
+ bin_seeds : array-like of shape (n_samples, n_features)
276
+ Points used as initial kernel positions in clustering.mean_shift.
277
+ """
278
+ if bin_size == 0:
279
+ return X
280
+
281
+ # Bin points
282
+ bin_sizes = defaultdict(int)
283
+ for point in X:
284
+ binned_point = np.round(point / bin_size)
285
+ bin_sizes[tuple(binned_point)] += 1
286
+
287
+ # Select only those bins as seeds which have enough members
288
+ bin_seeds = np.array(
289
+ [point for point, freq in bin_sizes.items() if freq >= min_bin_freq],
290
+ dtype=np.float32,
291
+ )
292
+ if len(bin_seeds) == len(X):
293
+ warnings.warn(
294
+ "Binning data failed with provided bin_size=%f, using data points as seeds."
295
+ % bin_size
296
+ )
297
+ return X
298
+ bin_seeds = bin_seeds * bin_size
299
+ return bin_seeds
300
+
301
+
302
+ class MeanShift(ClusterMixin, BaseEstimator):
303
+ """Mean shift clustering using a flat kernel.
304
+
305
+ Mean shift clustering aims to discover "blobs" in a smooth density of
306
+ samples. It is a centroid-based algorithm, which works by updating
307
+ candidates for centroids to be the mean of the points within a given
308
+ region. These candidates are then filtered in a post-processing stage to
309
+ eliminate near-duplicates to form the final set of centroids.
310
+
311
+ Seeding is performed using a binning technique for scalability.
312
+
313
+ Read more in the :ref:`User Guide <mean_shift>`.
314
+
315
+ Parameters
316
+ ----------
317
+ bandwidth : float, default=None
318
+ Bandwidth used in the flat kernel.
319
+
320
+ If not given, the bandwidth is estimated using
321
+ sklearn.cluster.estimate_bandwidth; see the documentation for that
322
+ function for hints on scalability (see also the Notes, below).
323
+
324
+ seeds : array-like of shape (n_samples, n_features), default=None
325
+ Seeds used to initialize kernels. If not set,
326
+ the seeds are calculated by clustering.get_bin_seeds
327
+ with bandwidth as the grid size and default values for
328
+ other parameters.
329
+
330
+ bin_seeding : bool, default=False
331
+ If true, initial kernel locations are not locations of all
332
+ points, but rather the location of the discretized version of
333
+ points, where points are binned onto a grid whose coarseness
334
+ corresponds to the bandwidth. Setting this option to True will speed
335
+ up the algorithm because fewer seeds will be initialized.
336
+ The default value is False.
337
+ Ignored if seeds argument is not None.
338
+
339
+ min_bin_freq : int, default=1
340
+ To speed up the algorithm, accept only those bins with at least
341
+ min_bin_freq points as seeds.
342
+
343
+ cluster_all : bool, default=True
344
+ If true, then all points are clustered, even those orphans that are
345
+ not within any kernel. Orphans are assigned to the nearest kernel.
346
+ If false, then orphans are given cluster label -1.
347
+
348
+ n_jobs : int, default=None
349
+ The number of jobs to use for the computation. The following tasks benefit
350
+ from the parallelization:
351
+
352
+ - The search of nearest neighbors for bandwidth estimation and label
353
+ assignments. See the details in the docstring of the
354
+ ``NearestNeighbors`` class.
355
+ - Hill-climbing optimization for all seeds.
356
+
357
+ See :term:`Glossary <n_jobs>` for more details.
358
+
359
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
360
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
361
+ for more details.
362
+
363
+ max_iter : int, default=300
364
+ Maximum number of iterations, per seed point before the clustering
365
+ operation terminates (for that seed point), if has not converged yet.
366
+
367
+ .. versionadded:: 0.22
368
+
369
+ Attributes
370
+ ----------
371
+ cluster_centers_ : ndarray of shape (n_clusters, n_features)
372
+ Coordinates of cluster centers.
373
+
374
+ labels_ : ndarray of shape (n_samples,)
375
+ Labels of each point.
376
+
377
+ n_iter_ : int
378
+ Maximum number of iterations performed on each seed.
379
+
380
+ .. versionadded:: 0.22
381
+
382
+ n_features_in_ : int
383
+ Number of features seen during :term:`fit`.
384
+
385
+ .. versionadded:: 0.24
386
+
387
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
388
+ Names of features seen during :term:`fit`. Defined only when `X`
389
+ has feature names that are all strings.
390
+
391
+ .. versionadded:: 1.0
392
+
393
+ See Also
394
+ --------
395
+ KMeans : K-Means clustering.
396
+
397
+ Notes
398
+ -----
399
+
400
+ Scalability:
401
+
402
+ Because this implementation uses a flat kernel and
403
+ a Ball Tree to look up members of each kernel, the complexity will tend
404
+ towards O(T*n*log(n)) in lower dimensions, with n the number of samples
405
+ and T the number of points. In higher dimensions the complexity will
406
+ tend towards O(T*n^2).
407
+
408
+ Scalability can be boosted by using fewer seeds, for example by using
409
+ a higher value of min_bin_freq in the get_bin_seeds function.
410
+
411
+ Note that the estimate_bandwidth function is much less scalable than the
412
+ mean shift algorithm and will be the bottleneck if it is used.
413
+
414
+ References
415
+ ----------
416
+
417
+ Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
418
+ feature space analysis". IEEE Transactions on Pattern Analysis and
419
+ Machine Intelligence. 2002. pp. 603-619.
420
+
421
+ Examples
422
+ --------
423
+ >>> from sklearn.cluster import MeanShift
424
+ >>> import numpy as np
425
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
426
+ ... [4, 7], [3, 5], [3, 6]])
427
+ >>> clustering = MeanShift(bandwidth=2).fit(X)
428
+ >>> clustering.labels_
429
+ array([1, 1, 1, 0, 0, 0])
430
+ >>> clustering.predict([[0, 0], [5, 5]])
431
+ array([1, 0])
432
+ >>> clustering
433
+ MeanShift(bandwidth=2)
434
+ """
435
+
436
+ _parameter_constraints: dict = {
437
+ "bandwidth": [Interval(Real, 0, None, closed="neither"), None],
438
+ "seeds": ["array-like", None],
439
+ "bin_seeding": ["boolean"],
440
+ "min_bin_freq": [Interval(Integral, 1, None, closed="left")],
441
+ "cluster_all": ["boolean"],
442
+ "n_jobs": [Integral, None],
443
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
444
+ }
445
+
446
+ def __init__(
447
+ self,
448
+ *,
449
+ bandwidth=None,
450
+ seeds=None,
451
+ bin_seeding=False,
452
+ min_bin_freq=1,
453
+ cluster_all=True,
454
+ n_jobs=None,
455
+ max_iter=300,
456
+ ):
457
+ self.bandwidth = bandwidth
458
+ self.seeds = seeds
459
+ self.bin_seeding = bin_seeding
460
+ self.cluster_all = cluster_all
461
+ self.min_bin_freq = min_bin_freq
462
+ self.n_jobs = n_jobs
463
+ self.max_iter = max_iter
464
+
465
+ @_fit_context(prefer_skip_nested_validation=True)
466
+ def fit(self, X, y=None):
467
+ """Perform clustering.
468
+
469
+ Parameters
470
+ ----------
471
+ X : array-like of shape (n_samples, n_features)
472
+ Samples to cluster.
473
+
474
+ y : Ignored
475
+ Not used, present for API consistency by convention.
476
+
477
+ Returns
478
+ -------
479
+ self : object
480
+ Fitted instance.
481
+ """
482
+ X = self._validate_data(X)
483
+ bandwidth = self.bandwidth
484
+ if bandwidth is None:
485
+ bandwidth = estimate_bandwidth(X, n_jobs=self.n_jobs)
486
+
487
+ seeds = self.seeds
488
+ if seeds is None:
489
+ if self.bin_seeding:
490
+ seeds = get_bin_seeds(X, bandwidth, self.min_bin_freq)
491
+ else:
492
+ seeds = X
493
+ n_samples, n_features = X.shape
494
+ center_intensity_dict = {}
495
+
496
+ # We use n_jobs=1 because this will be used in nested calls under
497
+ # parallel calls to _mean_shift_single_seed so there is no need for
498
+ # for further parallelism.
499
+ nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)
500
+
501
+ # execute iterations on all seeds in parallel
502
+ all_res = Parallel(n_jobs=self.n_jobs)(
503
+ delayed(_mean_shift_single_seed)(seed, X, nbrs, self.max_iter)
504
+ for seed in seeds
505
+ )
506
+ # copy results in a dictionary
507
+ for i in range(len(seeds)):
508
+ if all_res[i][1]: # i.e. len(points_within) > 0
509
+ center_intensity_dict[all_res[i][0]] = all_res[i][1]
510
+
511
+ self.n_iter_ = max([x[2] for x in all_res])
512
+
513
+ if not center_intensity_dict:
514
+ # nothing near seeds
515
+ raise ValueError(
516
+ "No point was within bandwidth=%f of any seed. Try a different seeding"
517
+ " strategy or increase the bandwidth."
518
+ % bandwidth
519
+ )
520
+
521
+ # POST PROCESSING: remove near duplicate points
522
+ # If the distance between two kernels is less than the bandwidth,
523
+ # then we have to remove one because it is a duplicate. Remove the
524
+ # one with fewer points.
525
+
526
+ sorted_by_intensity = sorted(
527
+ center_intensity_dict.items(),
528
+ key=lambda tup: (tup[1], tup[0]),
529
+ reverse=True,
530
+ )
531
+ sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
532
+ unique = np.ones(len(sorted_centers), dtype=bool)
533
+ nbrs = NearestNeighbors(radius=bandwidth, n_jobs=self.n_jobs).fit(
534
+ sorted_centers
535
+ )
536
+ for i, center in enumerate(sorted_centers):
537
+ if unique[i]:
538
+ neighbor_idxs = nbrs.radius_neighbors([center], return_distance=False)[
539
+ 0
540
+ ]
541
+ unique[neighbor_idxs] = 0
542
+ unique[i] = 1 # leave the current point as unique
543
+ cluster_centers = sorted_centers[unique]
544
+
545
+ # ASSIGN LABELS: a point belongs to the cluster that it is closest to
546
+ nbrs = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs).fit(cluster_centers)
547
+ labels = np.zeros(n_samples, dtype=int)
548
+ distances, idxs = nbrs.kneighbors(X)
549
+ if self.cluster_all:
550
+ labels = idxs.flatten()
551
+ else:
552
+ labels.fill(-1)
553
+ bool_selector = distances.flatten() <= bandwidth
554
+ labels[bool_selector] = idxs.flatten()[bool_selector]
555
+
556
+ self.cluster_centers_, self.labels_ = cluster_centers, labels
557
+ return self
558
+
559
+ def predict(self, X):
560
+ """Predict the closest cluster each sample in X belongs to.
561
+
562
+ Parameters
563
+ ----------
564
+ X : array-like of shape (n_samples, n_features)
565
+ New data to predict.
566
+
567
+ Returns
568
+ -------
569
+ labels : ndarray of shape (n_samples,)
570
+ Index of the cluster each sample belongs to.
571
+ """
572
+ check_is_fitted(self)
573
+ X = self._validate_data(X, reset=False)
574
+ with config_context(assume_finite=True):
575
+ return pairwise_distances_argmin(X, self.cluster_centers_)
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_optics.py ADDED
@@ -0,0 +1,1199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Ordering Points To Identify the Clustering Structure (OPTICS)
2
+
3
+ These routines execute the OPTICS algorithm, and implement various
4
+ cluster extraction methods of the ordered list.
5
+
6
+ Authors: Shane Grigsby <[email protected]>
7
+ Adrin Jalali <[email protected]>
8
+ Erich Schubert <[email protected]>
9
+ Hanmin Qin <[email protected]>
10
+ License: BSD 3 clause
11
+ """
12
+
13
+ import warnings
14
+ from numbers import Integral, Real
15
+
16
+ import numpy as np
17
+ from scipy.sparse import SparseEfficiencyWarning, issparse
18
+
19
+ from ..base import BaseEstimator, ClusterMixin, _fit_context
20
+ from ..exceptions import DataConversionWarning
21
+ from ..metrics import pairwise_distances
22
+ from ..metrics.pairwise import _VALID_METRICS, PAIRWISE_BOOLEAN_FUNCTIONS
23
+ from ..neighbors import NearestNeighbors
24
+ from ..utils import gen_batches, get_chunk_n_rows
25
+ from ..utils._param_validation import (
26
+ HasMethods,
27
+ Interval,
28
+ RealNotInt,
29
+ StrOptions,
30
+ validate_params,
31
+ )
32
+ from ..utils.validation import check_memory
33
+
34
+
35
+ class OPTICS(ClusterMixin, BaseEstimator):
36
+ """Estimate clustering structure from vector array.
37
+
38
+ OPTICS (Ordering Points To Identify the Clustering Structure), closely
39
+ related to DBSCAN, finds core sample of high density and expands clusters
40
+ from them [1]_. Unlike DBSCAN, keeps cluster hierarchy for a variable
41
+ neighborhood radius. Better suited for usage on large datasets than the
42
+ current sklearn implementation of DBSCAN.
43
+
44
+ Clusters are then extracted using a DBSCAN-like method
45
+ (cluster_method = 'dbscan') or an automatic
46
+ technique proposed in [1]_ (cluster_method = 'xi').
47
+
48
+ This implementation deviates from the original OPTICS by first performing
49
+ k-nearest-neighborhood searches on all points to identify core sizes, then
50
+ computing only the distances to unprocessed points when constructing the
51
+ cluster order. Note that we do not employ a heap to manage the expansion
52
+ candidates, so the time complexity will be O(n^2).
53
+
54
+ Read more in the :ref:`User Guide <optics>`.
55
+
56
+ Parameters
57
+ ----------
58
+ min_samples : int > 1 or float between 0 and 1, default=5
59
+ The number of samples in a neighborhood for a point to be considered as
60
+ a core point. Also, up and down steep regions can't have more than
61
+ ``min_samples`` consecutive non-steep points. Expressed as an absolute
62
+ number or a fraction of the number of samples (rounded to be at least
63
+ 2).
64
+
65
+ max_eps : float, default=np.inf
66
+ The maximum distance between two samples for one to be considered as
67
+ in the neighborhood of the other. Default value of ``np.inf`` will
68
+ identify clusters across all scales; reducing ``max_eps`` will result
69
+ in shorter run times.
70
+
71
+ metric : str or callable, default='minkowski'
72
+ Metric to use for distance computation. Any metric from scikit-learn
73
+ or scipy.spatial.distance can be used.
74
+
75
+ If metric is a callable function, it is called on each
76
+ pair of instances (rows) and the resulting value recorded. The callable
77
+ should take two arrays as input and return one value indicating the
78
+ distance between them. This works for Scipy's metrics, but is less
79
+ efficient than passing the metric name as a string. If metric is
80
+ "precomputed", `X` is assumed to be a distance matrix and must be
81
+ square.
82
+
83
+ Valid values for metric are:
84
+
85
+ - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
86
+ 'manhattan']
87
+
88
+ - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
89
+ 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
90
+ 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
91
+ 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
92
+ 'yule']
93
+
94
+ Sparse matrices are only supported by scikit-learn metrics.
95
+ See the documentation for scipy.spatial.distance for details on these
96
+ metrics.
97
+
98
+ .. note::
99
+ `'kulsinski'` is deprecated from SciPy 1.9 and will removed in SciPy 1.11.
100
+
101
+ p : float, default=2
102
+ Parameter for the Minkowski metric from
103
+ :class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
104
+ equivalent to using manhattan_distance (l1), and euclidean_distance
105
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
106
+
107
+ metric_params : dict, default=None
108
+ Additional keyword arguments for the metric function.
109
+
110
+ cluster_method : str, default='xi'
111
+ The extraction method used to extract clusters using the calculated
112
+ reachability and ordering. Possible values are "xi" and "dbscan".
113
+
114
+ eps : float, default=None
115
+ The maximum distance between two samples for one to be considered as
116
+ in the neighborhood of the other. By default it assumes the same value
117
+ as ``max_eps``.
118
+ Used only when ``cluster_method='dbscan'``.
119
+
120
+ xi : float between 0 and 1, default=0.05
121
+ Determines the minimum steepness on the reachability plot that
122
+ constitutes a cluster boundary. For example, an upwards point in the
123
+ reachability plot is defined by the ratio from one point to its
124
+ successor being at most 1-xi.
125
+ Used only when ``cluster_method='xi'``.
126
+
127
+ predecessor_correction : bool, default=True
128
+ Correct clusters according to the predecessors calculated by OPTICS
129
+ [2]_. This parameter has minimal effect on most datasets.
130
+ Used only when ``cluster_method='xi'``.
131
+
132
+ min_cluster_size : int > 1 or float between 0 and 1, default=None
133
+ Minimum number of samples in an OPTICS cluster, expressed as an
134
+ absolute number or a fraction of the number of samples (rounded to be
135
+ at least 2). If ``None``, the value of ``min_samples`` is used instead.
136
+ Used only when ``cluster_method='xi'``.
137
+
138
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
139
+ Algorithm used to compute the nearest neighbors:
140
+
141
+ - 'ball_tree' will use :class:`~sklearn.neighbors.BallTree`.
142
+ - 'kd_tree' will use :class:`~sklearn.neighbors.KDTree`.
143
+ - 'brute' will use a brute-force search.
144
+ - 'auto' (default) will attempt to decide the most appropriate
145
+ algorithm based on the values passed to :meth:`fit` method.
146
+
147
+ Note: fitting on sparse input will override the setting of
148
+ this parameter, using brute force.
149
+
150
+ leaf_size : int, default=30
151
+ Leaf size passed to :class:`~sklearn.neighbors.BallTree` or
152
+ :class:`~sklearn.neighbors.KDTree`. This can affect the speed of the
153
+ construction and query, as well as the memory required to store the
154
+ tree. The optimal value depends on the nature of the problem.
155
+
156
+ memory : str or object with the joblib.Memory interface, default=None
157
+ Used to cache the output of the computation of the tree.
158
+ By default, no caching is done. If a string is given, it is the
159
+ path to the caching directory.
160
+
161
+ n_jobs : int, default=None
162
+ The number of parallel jobs to run for neighbors search.
163
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
164
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
165
+ for more details.
166
+
167
+ Attributes
168
+ ----------
169
+ labels_ : ndarray of shape (n_samples,)
170
+ Cluster labels for each point in the dataset given to fit().
171
+ Noisy samples and points which are not included in a leaf cluster
172
+ of ``cluster_hierarchy_`` are labeled as -1.
173
+
174
+ reachability_ : ndarray of shape (n_samples,)
175
+ Reachability distances per sample, indexed by object order. Use
176
+ ``clust.reachability_[clust.ordering_]`` to access in cluster order.
177
+
178
+ ordering_ : ndarray of shape (n_samples,)
179
+ The cluster ordered list of sample indices.
180
+
181
+ core_distances_ : ndarray of shape (n_samples,)
182
+ Distance at which each sample becomes a core point, indexed by object
183
+ order. Points which will never be core have a distance of inf. Use
184
+ ``clust.core_distances_[clust.ordering_]`` to access in cluster order.
185
+
186
+ predecessor_ : ndarray of shape (n_samples,)
187
+ Point that a sample was reached from, indexed by object order.
188
+ Seed points have a predecessor of -1.
189
+
190
+ cluster_hierarchy_ : ndarray of shape (n_clusters, 2)
191
+ The list of clusters in the form of ``[start, end]`` in each row, with
192
+ all indices inclusive. The clusters are ordered according to
193
+ ``(end, -start)`` (ascending) so that larger clusters encompassing
194
+ smaller clusters come after those smaller ones. Since ``labels_`` does
195
+ not reflect the hierarchy, usually
196
+ ``len(cluster_hierarchy_) > np.unique(optics.labels_)``. Please also
197
+ note that these indices are of the ``ordering_``, i.e.
198
+ ``X[ordering_][start:end + 1]`` form a cluster.
199
+ Only available when ``cluster_method='xi'``.
200
+
201
+ n_features_in_ : int
202
+ Number of features seen during :term:`fit`.
203
+
204
+ .. versionadded:: 0.24
205
+
206
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
207
+ Names of features seen during :term:`fit`. Defined only when `X`
208
+ has feature names that are all strings.
209
+
210
+ .. versionadded:: 1.0
211
+
212
+ See Also
213
+ --------
214
+ DBSCAN : A similar clustering for a specified neighborhood radius (eps).
215
+ Our implementation is optimized for runtime.
216
+
217
+ References
218
+ ----------
219
+ .. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
220
+ and Jörg Sander. "OPTICS: ordering points to identify the clustering
221
+ structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
222
+
223
+ .. [2] Schubert, Erich, Michael Gertz.
224
+ "Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of
225
+ the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329.
226
+
227
+ Examples
228
+ --------
229
+ >>> from sklearn.cluster import OPTICS
230
+ >>> import numpy as np
231
+ >>> X = np.array([[1, 2], [2, 5], [3, 6],
232
+ ... [8, 7], [8, 8], [7, 3]])
233
+ >>> clustering = OPTICS(min_samples=2).fit(X)
234
+ >>> clustering.labels_
235
+ array([0, 0, 0, 1, 1, 1])
236
+
237
+ For a more detailed example see
238
+ :ref:`sphx_glr_auto_examples_cluster_plot_optics.py`.
239
+ """
240
+
241
+ _parameter_constraints: dict = {
242
+ "min_samples": [
243
+ Interval(Integral, 2, None, closed="left"),
244
+ Interval(RealNotInt, 0, 1, closed="both"),
245
+ ],
246
+ "max_eps": [Interval(Real, 0, None, closed="both")],
247
+ "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
248
+ "p": [Interval(Real, 1, None, closed="left")],
249
+ "metric_params": [dict, None],
250
+ "cluster_method": [StrOptions({"dbscan", "xi"})],
251
+ "eps": [Interval(Real, 0, None, closed="both"), None],
252
+ "xi": [Interval(Real, 0, 1, closed="both")],
253
+ "predecessor_correction": ["boolean"],
254
+ "min_cluster_size": [
255
+ Interval(Integral, 2, None, closed="left"),
256
+ Interval(RealNotInt, 0, 1, closed="right"),
257
+ None,
258
+ ],
259
+ "algorithm": [StrOptions({"auto", "brute", "ball_tree", "kd_tree"})],
260
+ "leaf_size": [Interval(Integral, 1, None, closed="left")],
261
+ "memory": [str, HasMethods("cache"), None],
262
+ "n_jobs": [Integral, None],
263
+ }
264
+
265
+ def __init__(
266
+ self,
267
+ *,
268
+ min_samples=5,
269
+ max_eps=np.inf,
270
+ metric="minkowski",
271
+ p=2,
272
+ metric_params=None,
273
+ cluster_method="xi",
274
+ eps=None,
275
+ xi=0.05,
276
+ predecessor_correction=True,
277
+ min_cluster_size=None,
278
+ algorithm="auto",
279
+ leaf_size=30,
280
+ memory=None,
281
+ n_jobs=None,
282
+ ):
283
+ self.max_eps = max_eps
284
+ self.min_samples = min_samples
285
+ self.min_cluster_size = min_cluster_size
286
+ self.algorithm = algorithm
287
+ self.metric = metric
288
+ self.metric_params = metric_params
289
+ self.p = p
290
+ self.leaf_size = leaf_size
291
+ self.cluster_method = cluster_method
292
+ self.eps = eps
293
+ self.xi = xi
294
+ self.predecessor_correction = predecessor_correction
295
+ self.memory = memory
296
+ self.n_jobs = n_jobs
297
+
298
+ @_fit_context(
299
+ # Optics.metric is not validated yet
300
+ prefer_skip_nested_validation=False
301
+ )
302
+ def fit(self, X, y=None):
303
+ """Perform OPTICS clustering.
304
+
305
+ Extracts an ordered list of points and reachability distances, and
306
+ performs initial clustering using ``max_eps`` distance specified at
307
+ OPTICS object instantiation.
308
+
309
+ Parameters
310
+ ----------
311
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \
312
+ (n_samples, n_samples) if metric='precomputed'
313
+ A feature array, or array of distances between samples if
314
+ metric='precomputed'. If a sparse matrix is provided, it will be
315
+ converted into CSR format.
316
+
317
+ y : Ignored
318
+ Not used, present for API consistency by convention.
319
+
320
+ Returns
321
+ -------
322
+ self : object
323
+ Returns a fitted instance of self.
324
+ """
325
+ dtype = bool if self.metric in PAIRWISE_BOOLEAN_FUNCTIONS else float
326
+ if dtype == bool and X.dtype != bool:
327
+ msg = (
328
+ "Data will be converted to boolean for"
329
+ f" metric {self.metric}, to avoid this warning,"
330
+ " you may convert the data prior to calling fit."
331
+ )
332
+ warnings.warn(msg, DataConversionWarning)
333
+
334
+ X = self._validate_data(X, dtype=dtype, accept_sparse="csr")
335
+ if self.metric == "precomputed" and issparse(X):
336
+ with warnings.catch_warnings():
337
+ warnings.simplefilter("ignore", SparseEfficiencyWarning)
338
+ # Set each diagonal to an explicit value so each point is its
339
+ # own neighbor
340
+ X.setdiag(X.diagonal())
341
+ memory = check_memory(self.memory)
342
+
343
+ (
344
+ self.ordering_,
345
+ self.core_distances_,
346
+ self.reachability_,
347
+ self.predecessor_,
348
+ ) = memory.cache(compute_optics_graph)(
349
+ X=X,
350
+ min_samples=self.min_samples,
351
+ algorithm=self.algorithm,
352
+ leaf_size=self.leaf_size,
353
+ metric=self.metric,
354
+ metric_params=self.metric_params,
355
+ p=self.p,
356
+ n_jobs=self.n_jobs,
357
+ max_eps=self.max_eps,
358
+ )
359
+
360
+ # Extract clusters from the calculated orders and reachability
361
+ if self.cluster_method == "xi":
362
+ labels_, clusters_ = cluster_optics_xi(
363
+ reachability=self.reachability_,
364
+ predecessor=self.predecessor_,
365
+ ordering=self.ordering_,
366
+ min_samples=self.min_samples,
367
+ min_cluster_size=self.min_cluster_size,
368
+ xi=self.xi,
369
+ predecessor_correction=self.predecessor_correction,
370
+ )
371
+ self.cluster_hierarchy_ = clusters_
372
+ elif self.cluster_method == "dbscan":
373
+ if self.eps is None:
374
+ eps = self.max_eps
375
+ else:
376
+ eps = self.eps
377
+
378
+ if eps > self.max_eps:
379
+ raise ValueError(
380
+ "Specify an epsilon smaller than %s. Got %s." % (self.max_eps, eps)
381
+ )
382
+
383
+ labels_ = cluster_optics_dbscan(
384
+ reachability=self.reachability_,
385
+ core_distances=self.core_distances_,
386
+ ordering=self.ordering_,
387
+ eps=eps,
388
+ )
389
+
390
+ self.labels_ = labels_
391
+ return self
392
+
393
+
394
+ def _validate_size(size, n_samples, param_name):
395
+ if size > n_samples:
396
+ raise ValueError(
397
+ "%s must be no greater than the number of samples (%d). Got %d"
398
+ % (param_name, n_samples, size)
399
+ )
400
+
401
+
402
+ # OPTICS helper functions
403
+ def _compute_core_distances_(X, neighbors, min_samples, working_memory):
404
+ """Compute the k-th nearest neighbor of each sample.
405
+
406
+ Equivalent to neighbors.kneighbors(X, self.min_samples)[0][:, -1]
407
+ but with more memory efficiency.
408
+
409
+ Parameters
410
+ ----------
411
+ X : array-like of shape (n_samples, n_features)
412
+ The data.
413
+ neighbors : NearestNeighbors instance
414
+ The fitted nearest neighbors estimator.
415
+ working_memory : int, default=None
416
+ The sought maximum memory for temporary distance matrix chunks.
417
+ When None (default), the value of
418
+ ``sklearn.get_config()['working_memory']`` is used.
419
+
420
+ Returns
421
+ -------
422
+ core_distances : ndarray of shape (n_samples,)
423
+ Distance at which each sample becomes a core point.
424
+ Points which will never be core have a distance of inf.
425
+ """
426
+ n_samples = X.shape[0]
427
+ core_distances = np.empty(n_samples)
428
+ core_distances.fill(np.nan)
429
+
430
+ chunk_n_rows = get_chunk_n_rows(
431
+ row_bytes=16 * min_samples, max_n_rows=n_samples, working_memory=working_memory
432
+ )
433
+ slices = gen_batches(n_samples, chunk_n_rows)
434
+ for sl in slices:
435
+ core_distances[sl] = neighbors.kneighbors(X[sl], min_samples)[0][:, -1]
436
+ return core_distances
437
+
438
+
439
+ @validate_params(
440
+ {
441
+ "X": [np.ndarray, "sparse matrix"],
442
+ "min_samples": [
443
+ Interval(Integral, 2, None, closed="left"),
444
+ Interval(RealNotInt, 0, 1, closed="both"),
445
+ ],
446
+ "max_eps": [Interval(Real, 0, None, closed="both")],
447
+ "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
448
+ "p": [Interval(Real, 0, None, closed="right"), None],
449
+ "metric_params": [dict, None],
450
+ "algorithm": [StrOptions({"auto", "brute", "ball_tree", "kd_tree"})],
451
+ "leaf_size": [Interval(Integral, 1, None, closed="left")],
452
+ "n_jobs": [Integral, None],
453
+ },
454
+ prefer_skip_nested_validation=False, # metric is not validated yet
455
+ )
456
+ def compute_optics_graph(
457
+ X, *, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs
458
+ ):
459
+ """Compute the OPTICS reachability graph.
460
+
461
+ Read more in the :ref:`User Guide <optics>`.
462
+
463
+ Parameters
464
+ ----------
465
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \
466
+ (n_samples, n_samples) if metric='precomputed'
467
+ A feature array, or array of distances between samples if
468
+ metric='precomputed'.
469
+
470
+ min_samples : int > 1 or float between 0 and 1
471
+ The number of samples in a neighborhood for a point to be considered
472
+ as a core point. Expressed as an absolute number or a fraction of the
473
+ number of samples (rounded to be at least 2).
474
+
475
+ max_eps : float, default=np.inf
476
+ The maximum distance between two samples for one to be considered as
477
+ in the neighborhood of the other. Default value of ``np.inf`` will
478
+ identify clusters across all scales; reducing ``max_eps`` will result
479
+ in shorter run times.
480
+
481
+ metric : str or callable, default='minkowski'
482
+ Metric to use for distance computation. Any metric from scikit-learn
483
+ or scipy.spatial.distance can be used.
484
+
485
+ If metric is a callable function, it is called on each
486
+ pair of instances (rows) and the resulting value recorded. The callable
487
+ should take two arrays as input and return one value indicating the
488
+ distance between them. This works for Scipy's metrics, but is less
489
+ efficient than passing the metric name as a string. If metric is
490
+ "precomputed", X is assumed to be a distance matrix and must be square.
491
+
492
+ Valid values for metric are:
493
+
494
+ - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
495
+ 'manhattan']
496
+
497
+ - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
498
+ 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
499
+ 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
500
+ 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
501
+ 'yule']
502
+
503
+ See the documentation for scipy.spatial.distance for details on these
504
+ metrics.
505
+
506
+ .. note::
507
+ `'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11.
508
+
509
+ p : float, default=2
510
+ Parameter for the Minkowski metric from
511
+ :class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
512
+ equivalent to using manhattan_distance (l1), and euclidean_distance
513
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
514
+
515
+ metric_params : dict, default=None
516
+ Additional keyword arguments for the metric function.
517
+
518
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
519
+ Algorithm used to compute the nearest neighbors:
520
+
521
+ - 'ball_tree' will use :class:`~sklearn.neighbors.BallTree`.
522
+ - 'kd_tree' will use :class:`~sklearn.neighbors.KDTree`.
523
+ - 'brute' will use a brute-force search.
524
+ - 'auto' will attempt to decide the most appropriate algorithm
525
+ based on the values passed to `fit` method. (default)
526
+
527
+ Note: fitting on sparse input will override the setting of
528
+ this parameter, using brute force.
529
+
530
+ leaf_size : int, default=30
531
+ Leaf size passed to :class:`~sklearn.neighbors.BallTree` or
532
+ :class:`~sklearn.neighbors.KDTree`. This can affect the speed of the
533
+ construction and query, as well as the memory required to store the
534
+ tree. The optimal value depends on the nature of the problem.
535
+
536
+ n_jobs : int, default=None
537
+ The number of parallel jobs to run for neighbors search.
538
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
539
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
540
+ for more details.
541
+
542
+ Returns
543
+ -------
544
+ ordering_ : array of shape (n_samples,)
545
+ The cluster ordered list of sample indices.
546
+
547
+ core_distances_ : array of shape (n_samples,)
548
+ Distance at which each sample becomes a core point, indexed by object
549
+ order. Points which will never be core have a distance of inf. Use
550
+ ``clust.core_distances_[clust.ordering_]`` to access in cluster order.
551
+
552
+ reachability_ : array of shape (n_samples,)
553
+ Reachability distances per sample, indexed by object order. Use
554
+ ``clust.reachability_[clust.ordering_]`` to access in cluster order.
555
+
556
+ predecessor_ : array of shape (n_samples,)
557
+ Point that a sample was reached from, indexed by object order.
558
+ Seed points have a predecessor of -1.
559
+
560
+ References
561
+ ----------
562
+ .. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
563
+ and Jörg Sander. "OPTICS: ordering points to identify the clustering
564
+ structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
565
+
566
+ Examples
567
+ --------
568
+ >>> import numpy as np
569
+ >>> from sklearn.cluster import compute_optics_graph
570
+ >>> X = np.array([[1, 2], [2, 5], [3, 6],
571
+ ... [8, 7], [8, 8], [7, 3]])
572
+ >>> ordering, core_distances, reachability, predecessor = compute_optics_graph(
573
+ ... X,
574
+ ... min_samples=2,
575
+ ... max_eps=np.inf,
576
+ ... metric="minkowski",
577
+ ... p=2,
578
+ ... metric_params=None,
579
+ ... algorithm="auto",
580
+ ... leaf_size=30,
581
+ ... n_jobs=None,
582
+ ... )
583
+ >>> ordering
584
+ array([0, 1, 2, 5, 3, 4])
585
+ >>> core_distances
586
+ array([3.16..., 1.41..., 1.41..., 1. , 1. ,
587
+ 4.12...])
588
+ >>> reachability
589
+ array([ inf, 3.16..., 1.41..., 4.12..., 1. ,
590
+ 5. ])
591
+ >>> predecessor
592
+ array([-1, 0, 1, 5, 3, 2])
593
+ """
594
+ n_samples = X.shape[0]
595
+ _validate_size(min_samples, n_samples, "min_samples")
596
+ if min_samples <= 1:
597
+ min_samples = max(2, int(min_samples * n_samples))
598
+
599
+ # Start all points as 'unprocessed' ##
600
+ reachability_ = np.empty(n_samples)
601
+ reachability_.fill(np.inf)
602
+ predecessor_ = np.empty(n_samples, dtype=int)
603
+ predecessor_.fill(-1)
604
+
605
+ nbrs = NearestNeighbors(
606
+ n_neighbors=min_samples,
607
+ algorithm=algorithm,
608
+ leaf_size=leaf_size,
609
+ metric=metric,
610
+ metric_params=metric_params,
611
+ p=p,
612
+ n_jobs=n_jobs,
613
+ )
614
+
615
+ nbrs.fit(X)
616
+ # Here we first do a kNN query for each point, this differs from
617
+ # the original OPTICS that only used epsilon range queries.
618
+ # TODO: handle working_memory somehow?
619
+ core_distances_ = _compute_core_distances_(
620
+ X=X, neighbors=nbrs, min_samples=min_samples, working_memory=None
621
+ )
622
+ # OPTICS puts an upper limit on these, use inf for undefined.
623
+ core_distances_[core_distances_ > max_eps] = np.inf
624
+ np.around(
625
+ core_distances_,
626
+ decimals=np.finfo(core_distances_.dtype).precision,
627
+ out=core_distances_,
628
+ )
629
+
630
+ # Main OPTICS loop. Not parallelizable. The order that entries are
631
+ # written to the 'ordering_' list is important!
632
+ # Note that this implementation is O(n^2) theoretically, but
633
+ # supposedly with very low constant factors.
634
+ processed = np.zeros(X.shape[0], dtype=bool)
635
+ ordering = np.zeros(X.shape[0], dtype=int)
636
+ for ordering_idx in range(X.shape[0]):
637
+ # Choose next based on smallest reachability distance
638
+ # (And prefer smaller ids on ties, possibly np.inf!)
639
+ index = np.where(processed == 0)[0]
640
+ point = index[np.argmin(reachability_[index])]
641
+
642
+ processed[point] = True
643
+ ordering[ordering_idx] = point
644
+ if core_distances_[point] != np.inf:
645
+ _set_reach_dist(
646
+ core_distances_=core_distances_,
647
+ reachability_=reachability_,
648
+ predecessor_=predecessor_,
649
+ point_index=point,
650
+ processed=processed,
651
+ X=X,
652
+ nbrs=nbrs,
653
+ metric=metric,
654
+ metric_params=metric_params,
655
+ p=p,
656
+ max_eps=max_eps,
657
+ )
658
+ if np.all(np.isinf(reachability_)):
659
+ warnings.warn(
660
+ (
661
+ "All reachability values are inf. Set a larger"
662
+ " max_eps or all data will be considered outliers."
663
+ ),
664
+ UserWarning,
665
+ )
666
+ return ordering, core_distances_, reachability_, predecessor_
667
+
668
+
669
+ def _set_reach_dist(
670
+ core_distances_,
671
+ reachability_,
672
+ predecessor_,
673
+ point_index,
674
+ processed,
675
+ X,
676
+ nbrs,
677
+ metric,
678
+ metric_params,
679
+ p,
680
+ max_eps,
681
+ ):
682
+ P = X[point_index : point_index + 1]
683
+ # Assume that radius_neighbors is faster without distances
684
+ # and we don't need all distances, nevertheless, this means
685
+ # we may be doing some work twice.
686
+ indices = nbrs.radius_neighbors(P, radius=max_eps, return_distance=False)[0]
687
+
688
+ # Getting indices of neighbors that have not been processed
689
+ unproc = np.compress(~np.take(processed, indices), indices)
690
+ # Neighbors of current point are already processed.
691
+ if not unproc.size:
692
+ return
693
+
694
+ # Only compute distances to unprocessed neighbors:
695
+ if metric == "precomputed":
696
+ dists = X[[point_index], unproc]
697
+ if isinstance(dists, np.matrix):
698
+ dists = np.asarray(dists)
699
+ dists = dists.ravel()
700
+ else:
701
+ _params = dict() if metric_params is None else metric_params.copy()
702
+ if metric == "minkowski" and "p" not in _params:
703
+ # the same logic as neighbors, p is ignored if explicitly set
704
+ # in the dict params
705
+ _params["p"] = p
706
+ dists = pairwise_distances(P, X[unproc], metric, n_jobs=None, **_params).ravel()
707
+
708
+ rdists = np.maximum(dists, core_distances_[point_index])
709
+ np.around(rdists, decimals=np.finfo(rdists.dtype).precision, out=rdists)
710
+ improved = np.where(rdists < np.take(reachability_, unproc))
711
+ reachability_[unproc[improved]] = rdists[improved]
712
+ predecessor_[unproc[improved]] = point_index
713
+
714
+
715
+ @validate_params(
716
+ {
717
+ "reachability": [np.ndarray],
718
+ "core_distances": [np.ndarray],
719
+ "ordering": [np.ndarray],
720
+ "eps": [Interval(Real, 0, None, closed="both")],
721
+ },
722
+ prefer_skip_nested_validation=True,
723
+ )
724
+ def cluster_optics_dbscan(*, reachability, core_distances, ordering, eps):
725
+ """Perform DBSCAN extraction for an arbitrary epsilon.
726
+
727
+ Extracting the clusters runs in linear time. Note that this results in
728
+ ``labels_`` which are close to a :class:`~sklearn.cluster.DBSCAN` with
729
+ similar settings and ``eps``, only if ``eps`` is close to ``max_eps``.
730
+
731
+ Parameters
732
+ ----------
733
+ reachability : ndarray of shape (n_samples,)
734
+ Reachability distances calculated by OPTICS (``reachability_``).
735
+
736
+ core_distances : ndarray of shape (n_samples,)
737
+ Distances at which points become core (``core_distances_``).
738
+
739
+ ordering : ndarray of shape (n_samples,)
740
+ OPTICS ordered point indices (``ordering_``).
741
+
742
+ eps : float
743
+ DBSCAN ``eps`` parameter. Must be set to < ``max_eps``. Results
744
+ will be close to DBSCAN algorithm if ``eps`` and ``max_eps`` are close
745
+ to one another.
746
+
747
+ Returns
748
+ -------
749
+ labels_ : array of shape (n_samples,)
750
+ The estimated labels.
751
+
752
+ Examples
753
+ --------
754
+ >>> import numpy as np
755
+ >>> from sklearn.cluster import cluster_optics_dbscan, compute_optics_graph
756
+ >>> X = np.array([[1, 2], [2, 5], [3, 6],
757
+ ... [8, 7], [8, 8], [7, 3]])
758
+ >>> ordering, core_distances, reachability, predecessor = compute_optics_graph(
759
+ ... X,
760
+ ... min_samples=2,
761
+ ... max_eps=np.inf,
762
+ ... metric="minkowski",
763
+ ... p=2,
764
+ ... metric_params=None,
765
+ ... algorithm="auto",
766
+ ... leaf_size=30,
767
+ ... n_jobs=None,
768
+ ... )
769
+ >>> eps = 4.5
770
+ >>> labels = cluster_optics_dbscan(
771
+ ... reachability=reachability,
772
+ ... core_distances=core_distances,
773
+ ... ordering=ordering,
774
+ ... eps=eps,
775
+ ... )
776
+ >>> labels
777
+ array([0, 0, 0, 1, 1, 1])
778
+ """
779
+ n_samples = len(core_distances)
780
+ labels = np.zeros(n_samples, dtype=int)
781
+
782
+ far_reach = reachability > eps
783
+ near_core = core_distances <= eps
784
+ labels[ordering] = np.cumsum(far_reach[ordering] & near_core[ordering]) - 1
785
+ labels[far_reach & ~near_core] = -1
786
+ return labels
787
+
788
+
789
+ @validate_params(
790
+ {
791
+ "reachability": [np.ndarray],
792
+ "predecessor": [np.ndarray],
793
+ "ordering": [np.ndarray],
794
+ "min_samples": [
795
+ Interval(Integral, 2, None, closed="left"),
796
+ Interval(RealNotInt, 0, 1, closed="both"),
797
+ ],
798
+ "min_cluster_size": [
799
+ Interval(Integral, 2, None, closed="left"),
800
+ Interval(RealNotInt, 0, 1, closed="both"),
801
+ None,
802
+ ],
803
+ "xi": [Interval(Real, 0, 1, closed="both")],
804
+ "predecessor_correction": ["boolean"],
805
+ },
806
+ prefer_skip_nested_validation=True,
807
+ )
808
+ def cluster_optics_xi(
809
+ *,
810
+ reachability,
811
+ predecessor,
812
+ ordering,
813
+ min_samples,
814
+ min_cluster_size=None,
815
+ xi=0.05,
816
+ predecessor_correction=True,
817
+ ):
818
+ """Automatically extract clusters according to the Xi-steep method.
819
+
820
+ Parameters
821
+ ----------
822
+ reachability : ndarray of shape (n_samples,)
823
+ Reachability distances calculated by OPTICS (`reachability_`).
824
+
825
+ predecessor : ndarray of shape (n_samples,)
826
+ Predecessors calculated by OPTICS.
827
+
828
+ ordering : ndarray of shape (n_samples,)
829
+ OPTICS ordered point indices (`ordering_`).
830
+
831
+ min_samples : int > 1 or float between 0 and 1
832
+ The same as the min_samples given to OPTICS. Up and down steep regions
833
+ can't have more then ``min_samples`` consecutive non-steep points.
834
+ Expressed as an absolute number or a fraction of the number of samples
835
+ (rounded to be at least 2).
836
+
837
+ min_cluster_size : int > 1 or float between 0 and 1, default=None
838
+ Minimum number of samples in an OPTICS cluster, expressed as an
839
+ absolute number or a fraction of the number of samples (rounded to be
840
+ at least 2). If ``None``, the value of ``min_samples`` is used instead.
841
+
842
+ xi : float between 0 and 1, default=0.05
843
+ Determines the minimum steepness on the reachability plot that
844
+ constitutes a cluster boundary. For example, an upwards point in the
845
+ reachability plot is defined by the ratio from one point to its
846
+ successor being at most 1-xi.
847
+
848
+ predecessor_correction : bool, default=True
849
+ Correct clusters based on the calculated predecessors.
850
+
851
+ Returns
852
+ -------
853
+ labels : ndarray of shape (n_samples,)
854
+ The labels assigned to samples. Points which are not included
855
+ in any cluster are labeled as -1.
856
+
857
+ clusters : ndarray of shape (n_clusters, 2)
858
+ The list of clusters in the form of ``[start, end]`` in each row, with
859
+ all indices inclusive. The clusters are ordered according to ``(end,
860
+ -start)`` (ascending) so that larger clusters encompassing smaller
861
+ clusters come after such nested smaller clusters. Since ``labels`` does
862
+ not reflect the hierarchy, usually ``len(clusters) >
863
+ np.unique(labels)``.
864
+
865
+ Examples
866
+ --------
867
+ >>> import numpy as np
868
+ >>> from sklearn.cluster import cluster_optics_xi, compute_optics_graph
869
+ >>> X = np.array([[1, 2], [2, 5], [3, 6],
870
+ ... [8, 7], [8, 8], [7, 3]])
871
+ >>> ordering, core_distances, reachability, predecessor = compute_optics_graph(
872
+ ... X,
873
+ ... min_samples=2,
874
+ ... max_eps=np.inf,
875
+ ... metric="minkowski",
876
+ ... p=2,
877
+ ... metric_params=None,
878
+ ... algorithm="auto",
879
+ ... leaf_size=30,
880
+ ... n_jobs=None
881
+ ... )
882
+ >>> min_samples = 2
883
+ >>> labels, clusters = cluster_optics_xi(
884
+ ... reachability=reachability,
885
+ ... predecessor=predecessor,
886
+ ... ordering=ordering,
887
+ ... min_samples=min_samples,
888
+ ... )
889
+ >>> labels
890
+ array([0, 0, 0, 1, 1, 1])
891
+ >>> clusters
892
+ array([[0, 2],
893
+ [3, 5],
894
+ [0, 5]])
895
+ """
896
+ n_samples = len(reachability)
897
+ _validate_size(min_samples, n_samples, "min_samples")
898
+ if min_samples <= 1:
899
+ min_samples = max(2, int(min_samples * n_samples))
900
+ if min_cluster_size is None:
901
+ min_cluster_size = min_samples
902
+ _validate_size(min_cluster_size, n_samples, "min_cluster_size")
903
+ if min_cluster_size <= 1:
904
+ min_cluster_size = max(2, int(min_cluster_size * n_samples))
905
+
906
+ clusters = _xi_cluster(
907
+ reachability[ordering],
908
+ predecessor[ordering],
909
+ ordering,
910
+ xi,
911
+ min_samples,
912
+ min_cluster_size,
913
+ predecessor_correction,
914
+ )
915
+ labels = _extract_xi_labels(ordering, clusters)
916
+ return labels, clusters
917
+
918
+
919
+ def _extend_region(steep_point, xward_point, start, min_samples):
920
+ """Extend the area until it's maximal.
921
+
922
+ It's the same function for both upward and downward reagions, depending on
923
+ the given input parameters. Assuming:
924
+
925
+ - steep_{upward/downward}: bool array indicating whether a point is a
926
+ steep {upward/downward};
927
+ - upward/downward: bool array indicating whether a point is
928
+ upward/downward;
929
+
930
+ To extend an upward reagion, ``steep_point=steep_upward`` and
931
+ ``xward_point=downward`` are expected, and to extend a downward region,
932
+ ``steep_point=steep_downward`` and ``xward_point=upward``.
933
+
934
+ Parameters
935
+ ----------
936
+ steep_point : ndarray of shape (n_samples,), dtype=bool
937
+ True if the point is steep downward (upward).
938
+
939
+ xward_point : ndarray of shape (n_samples,), dtype=bool
940
+ True if the point is an upward (respectively downward) point.
941
+
942
+ start : int
943
+ The start of the xward region.
944
+
945
+ min_samples : int
946
+ The same as the min_samples given to OPTICS. Up and down steep
947
+ regions can't have more then ``min_samples`` consecutive non-steep
948
+ points.
949
+
950
+ Returns
951
+ -------
952
+ index : int
953
+ The current index iterating over all the samples, i.e. where we are up
954
+ to in our search.
955
+
956
+ end : int
957
+ The end of the region, which can be behind the index. The region
958
+ includes the ``end`` index.
959
+ """
960
+ n_samples = len(steep_point)
961
+ non_xward_points = 0
962
+ index = start
963
+ end = start
964
+ # find a maximal area
965
+ while index < n_samples:
966
+ if steep_point[index]:
967
+ non_xward_points = 0
968
+ end = index
969
+ elif not xward_point[index]:
970
+ # it's not a steep point, but still goes up.
971
+ non_xward_points += 1
972
+ # region should include no more than min_samples consecutive
973
+ # non steep xward points.
974
+ if non_xward_points > min_samples:
975
+ break
976
+ else:
977
+ return end
978
+ index += 1
979
+ return end
980
+
981
+
982
+ def _update_filter_sdas(sdas, mib, xi_complement, reachability_plot):
983
+ """Update steep down areas (SDAs) using the new maximum in between (mib)
984
+ value, and the given complement of xi, i.e. ``1 - xi``.
985
+ """
986
+ if np.isinf(mib):
987
+ return []
988
+ res = [
989
+ sda for sda in sdas if mib <= reachability_plot[sda["start"]] * xi_complement
990
+ ]
991
+ for sda in res:
992
+ sda["mib"] = max(sda["mib"], mib)
993
+ return res
994
+
995
+
996
+ def _correct_predecessor(reachability_plot, predecessor_plot, ordering, s, e):
997
+ """Correct for predecessors.
998
+
999
+ Applies Algorithm 2 of [1]_.
1000
+
1001
+ Input parameters are ordered by the computer OPTICS ordering.
1002
+
1003
+ .. [1] Schubert, Erich, Michael Gertz.
1004
+ "Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of
1005
+ the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329.
1006
+ """
1007
+ while s < e:
1008
+ if reachability_plot[s] > reachability_plot[e]:
1009
+ return s, e
1010
+ p_e = predecessor_plot[e]
1011
+ for i in range(s, e):
1012
+ if p_e == ordering[i]:
1013
+ return s, e
1014
+ e -= 1
1015
+ return None, None
1016
+
1017
+
1018
+ def _xi_cluster(
1019
+ reachability_plot,
1020
+ predecessor_plot,
1021
+ ordering,
1022
+ xi,
1023
+ min_samples,
1024
+ min_cluster_size,
1025
+ predecessor_correction,
1026
+ ):
1027
+ """Automatically extract clusters according to the Xi-steep method.
1028
+
1029
+ This is rouphly an implementation of Figure 19 of the OPTICS paper.
1030
+
1031
+ Parameters
1032
+ ----------
1033
+ reachability_plot : array-like of shape (n_samples,)
1034
+ The reachability plot, i.e. reachability ordered according to
1035
+ the calculated ordering, all computed by OPTICS.
1036
+
1037
+ predecessor_plot : array-like of shape (n_samples,)
1038
+ Predecessors ordered according to the calculated ordering.
1039
+
1040
+ xi : float, between 0 and 1
1041
+ Determines the minimum steepness on the reachability plot that
1042
+ constitutes a cluster boundary. For example, an upwards point in the
1043
+ reachability plot is defined by the ratio from one point to its
1044
+ successor being at most 1-xi.
1045
+
1046
+ min_samples : int > 1
1047
+ The same as the min_samples given to OPTICS. Up and down steep regions
1048
+ can't have more then ``min_samples`` consecutive non-steep points.
1049
+
1050
+ min_cluster_size : int > 1
1051
+ Minimum number of samples in an OPTICS cluster.
1052
+
1053
+ predecessor_correction : bool
1054
+ Correct clusters based on the calculated predecessors.
1055
+
1056
+ Returns
1057
+ -------
1058
+ clusters : ndarray of shape (n_clusters, 2)
1059
+ The list of clusters in the form of [start, end] in each row, with all
1060
+ indices inclusive. The clusters are ordered in a way that larger
1061
+ clusters encompassing smaller clusters come after those smaller
1062
+ clusters.
1063
+ """
1064
+
1065
+ # Our implementation adds an inf to the end of reachability plot
1066
+ # this helps to find potential clusters at the end of the
1067
+ # reachability plot even if there's no upward region at the end of it.
1068
+ reachability_plot = np.hstack((reachability_plot, np.inf))
1069
+
1070
+ xi_complement = 1 - xi
1071
+ sdas = [] # steep down areas, introduced in section 4.3.2 of the paper
1072
+ clusters = []
1073
+ index = 0
1074
+ mib = 0.0 # maximum in between, section 4.3.2
1075
+
1076
+ # Our implementation corrects a mistake in the original
1077
+ # paper, i.e., in Definition 9 steep downward point,
1078
+ # r(p) * (1 - x1) <= r(p + 1) should be
1079
+ # r(p) * (1 - x1) >= r(p + 1)
1080
+ with np.errstate(invalid="ignore"):
1081
+ ratio = reachability_plot[:-1] / reachability_plot[1:]
1082
+ steep_upward = ratio <= xi_complement
1083
+ steep_downward = ratio >= 1 / xi_complement
1084
+ downward = ratio > 1
1085
+ upward = ratio < 1
1086
+
1087
+ # the following loop is almost exactly as Figure 19 of the paper.
1088
+ # it jumps over the areas which are not either steep down or up areas
1089
+ for steep_index in iter(np.flatnonzero(steep_upward | steep_downward)):
1090
+ # just continue if steep_index has been a part of a discovered xward
1091
+ # area.
1092
+ if steep_index < index:
1093
+ continue
1094
+
1095
+ mib = max(mib, np.max(reachability_plot[index : steep_index + 1]))
1096
+
1097
+ # steep downward areas
1098
+ if steep_downward[steep_index]:
1099
+ sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability_plot)
1100
+ D_start = steep_index
1101
+ D_end = _extend_region(steep_downward, upward, D_start, min_samples)
1102
+ D = {"start": D_start, "end": D_end, "mib": 0.0}
1103
+ sdas.append(D)
1104
+ index = D_end + 1
1105
+ mib = reachability_plot[index]
1106
+
1107
+ # steep upward areas
1108
+ else:
1109
+ sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability_plot)
1110
+ U_start = steep_index
1111
+ U_end = _extend_region(steep_upward, downward, U_start, min_samples)
1112
+ index = U_end + 1
1113
+ mib = reachability_plot[index]
1114
+
1115
+ U_clusters = []
1116
+ for D in sdas:
1117
+ c_start = D["start"]
1118
+ c_end = U_end
1119
+
1120
+ # line (**), sc2*
1121
+ if reachability_plot[c_end + 1] * xi_complement < D["mib"]:
1122
+ continue
1123
+
1124
+ # Definition 11: criterion 4
1125
+ D_max = reachability_plot[D["start"]]
1126
+ if D_max * xi_complement >= reachability_plot[c_end + 1]:
1127
+ # Find the first index from the left side which is almost
1128
+ # at the same level as the end of the detected cluster.
1129
+ while (
1130
+ reachability_plot[c_start + 1] > reachability_plot[c_end + 1]
1131
+ and c_start < D["end"]
1132
+ ):
1133
+ c_start += 1
1134
+ elif reachability_plot[c_end + 1] * xi_complement >= D_max:
1135
+ # Find the first index from the right side which is almost
1136
+ # at the same level as the beginning of the detected
1137
+ # cluster.
1138
+ # Our implementation corrects a mistake in the original
1139
+ # paper, i.e., in Definition 11 4c, r(x) < r(sD) should be
1140
+ # r(x) > r(sD).
1141
+ while reachability_plot[c_end - 1] > D_max and c_end > U_start:
1142
+ c_end -= 1
1143
+
1144
+ # predecessor correction
1145
+ if predecessor_correction:
1146
+ c_start, c_end = _correct_predecessor(
1147
+ reachability_plot, predecessor_plot, ordering, c_start, c_end
1148
+ )
1149
+ if c_start is None:
1150
+ continue
1151
+
1152
+ # Definition 11: criterion 3.a
1153
+ if c_end - c_start + 1 < min_cluster_size:
1154
+ continue
1155
+
1156
+ # Definition 11: criterion 1
1157
+ if c_start > D["end"]:
1158
+ continue
1159
+
1160
+ # Definition 11: criterion 2
1161
+ if c_end < U_start:
1162
+ continue
1163
+
1164
+ U_clusters.append((c_start, c_end))
1165
+
1166
+ # add smaller clusters first.
1167
+ U_clusters.reverse()
1168
+ clusters.extend(U_clusters)
1169
+
1170
+ return np.array(clusters)
1171
+
1172
+
1173
+ def _extract_xi_labels(ordering, clusters):
1174
+ """Extracts the labels from the clusters returned by `_xi_cluster`.
1175
+ We rely on the fact that clusters are stored
1176
+ with the smaller clusters coming before the larger ones.
1177
+
1178
+ Parameters
1179
+ ----------
1180
+ ordering : array-like of shape (n_samples,)
1181
+ The ordering of points calculated by OPTICS
1182
+
1183
+ clusters : array-like of shape (n_clusters, 2)
1184
+ List of clusters i.e. (start, end) tuples,
1185
+ as returned by `_xi_cluster`.
1186
+
1187
+ Returns
1188
+ -------
1189
+ labels : ndarray of shape (n_samples,)
1190
+ """
1191
+
1192
+ labels = np.full(len(ordering), -1, dtype=int)
1193
+ label = 0
1194
+ for c in clusters:
1195
+ if not np.any(labels[c[0] : (c[1] + 1)] != -1):
1196
+ labels[c[0] : (c[1] + 1)] = label
1197
+ label += 1
1198
+ labels[ordering] = labels.copy()
1199
+ return labels
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_spectral.py ADDED
@@ -0,0 +1,799 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Algorithms for spectral clustering"""
2
+
3
+ # Author: Gael Varoquaux <[email protected]>
4
+ # Brian Cheung
5
+ # Wei LI <[email protected]>
6
+ # Andrew Knyazev <[email protected]>
7
+ # License: BSD 3 clause
8
+
9
+ import warnings
10
+ from numbers import Integral, Real
11
+
12
+ import numpy as np
13
+ from scipy.linalg import LinAlgError, qr, svd
14
+ from scipy.sparse import csc_matrix
15
+
16
+ from ..base import BaseEstimator, ClusterMixin, _fit_context
17
+ from ..manifold import spectral_embedding
18
+ from ..metrics.pairwise import KERNEL_PARAMS, pairwise_kernels
19
+ from ..neighbors import NearestNeighbors, kneighbors_graph
20
+ from ..utils import as_float_array, check_random_state
21
+ from ..utils._param_validation import Interval, StrOptions, validate_params
22
+ from ._kmeans import k_means
23
+
24
+
25
+ def cluster_qr(vectors):
26
+ """Find the discrete partition closest to the eigenvector embedding.
27
+
28
+ This implementation was proposed in [1]_.
29
+
30
+ .. versionadded:: 1.1
31
+
32
+ Parameters
33
+ ----------
34
+ vectors : array-like, shape: (n_samples, n_clusters)
35
+ The embedding space of the samples.
36
+
37
+ Returns
38
+ -------
39
+ labels : array of integers, shape: n_samples
40
+ The cluster labels of vectors.
41
+
42
+ References
43
+ ----------
44
+ .. [1] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
45
+ Anil Damle, Victor Minden, Lexing Ying
46
+ <10.1093/imaiai/iay008>`
47
+
48
+ """
49
+
50
+ k = vectors.shape[1]
51
+ _, _, piv = qr(vectors.T, pivoting=True)
52
+ ut, _, v = svd(vectors[piv[:k], :].T)
53
+ vectors = abs(np.dot(vectors, np.dot(ut, v.conj())))
54
+ return vectors.argmax(axis=1)
55
+
56
+
57
+ def discretize(
58
+ vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None
59
+ ):
60
+ """Search for a partition matrix which is closest to the eigenvector embedding.
61
+
62
+ This implementation was proposed in [1]_.
63
+
64
+ Parameters
65
+ ----------
66
+ vectors : array-like of shape (n_samples, n_clusters)
67
+ The embedding space of the samples.
68
+
69
+ copy : bool, default=True
70
+ Whether to copy vectors, or perform in-place normalization.
71
+
72
+ max_svd_restarts : int, default=30
73
+ Maximum number of attempts to restart SVD if convergence fails
74
+
75
+ n_iter_max : int, default=30
76
+ Maximum number of iterations to attempt in rotation and partition
77
+ matrix search if machine precision convergence is not reached
78
+
79
+ random_state : int, RandomState instance, default=None
80
+ Determines random number generation for rotation matrix initialization.
81
+ Use an int to make the randomness deterministic.
82
+ See :term:`Glossary <random_state>`.
83
+
84
+ Returns
85
+ -------
86
+ labels : array of integers, shape: n_samples
87
+ The labels of the clusters.
88
+
89
+ References
90
+ ----------
91
+
92
+ .. [1] `Multiclass spectral clustering, 2003
93
+ Stella X. Yu, Jianbo Shi
94
+ <https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
95
+
96
+ Notes
97
+ -----
98
+
99
+ The eigenvector embedding is used to iteratively search for the
100
+ closest discrete partition. First, the eigenvector embedding is
101
+ normalized to the space of partition matrices. An optimal discrete
102
+ partition matrix closest to this normalized embedding multiplied by
103
+ an initial rotation is calculated. Fixing this discrete partition
104
+ matrix, an optimal rotation matrix is calculated. These two
105
+ calculations are performed until convergence. The discrete partition
106
+ matrix is returned as the clustering solution. Used in spectral
107
+ clustering, this method tends to be faster and more robust to random
108
+ initialization than k-means.
109
+
110
+ """
111
+
112
+ random_state = check_random_state(random_state)
113
+
114
+ vectors = as_float_array(vectors, copy=copy)
115
+
116
+ eps = np.finfo(float).eps
117
+ n_samples, n_components = vectors.shape
118
+
119
+ # Normalize the eigenvectors to an equal length of a vector of ones.
120
+ # Reorient the eigenvectors to point in the negative direction with respect
121
+ # to the first element. This may have to do with constraining the
122
+ # eigenvectors to lie in a specific quadrant to make the discretization
123
+ # search easier.
124
+ norm_ones = np.sqrt(n_samples)
125
+ for i in range(vectors.shape[1]):
126
+ vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) * norm_ones
127
+ if vectors[0, i] != 0:
128
+ vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
129
+
130
+ # Normalize the rows of the eigenvectors. Samples should lie on the unit
131
+ # hypersphere centered at the origin. This transforms the samples in the
132
+ # embedding space to the space of partition matrices.
133
+ vectors = vectors / np.sqrt((vectors**2).sum(axis=1))[:, np.newaxis]
134
+
135
+ svd_restarts = 0
136
+ has_converged = False
137
+
138
+ # If there is an exception we try to randomize and rerun SVD again
139
+ # do this max_svd_restarts times.
140
+ while (svd_restarts < max_svd_restarts) and not has_converged:
141
+ # Initialize first column of rotation matrix with a row of the
142
+ # eigenvectors
143
+ rotation = np.zeros((n_components, n_components))
144
+ rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
145
+
146
+ # To initialize the rest of the rotation matrix, find the rows
147
+ # of the eigenvectors that are as orthogonal to each other as
148
+ # possible
149
+ c = np.zeros(n_samples)
150
+ for j in range(1, n_components):
151
+ # Accumulate c to ensure row is as orthogonal as possible to
152
+ # previous picks as well as current one
153
+ c += np.abs(np.dot(vectors, rotation[:, j - 1]))
154
+ rotation[:, j] = vectors[c.argmin(), :].T
155
+
156
+ last_objective_value = 0.0
157
+ n_iter = 0
158
+
159
+ while not has_converged:
160
+ n_iter += 1
161
+
162
+ t_discrete = np.dot(vectors, rotation)
163
+
164
+ labels = t_discrete.argmax(axis=1)
165
+ vectors_discrete = csc_matrix(
166
+ (np.ones(len(labels)), (np.arange(0, n_samples), labels)),
167
+ shape=(n_samples, n_components),
168
+ )
169
+
170
+ t_svd = vectors_discrete.T * vectors
171
+
172
+ try:
173
+ U, S, Vh = np.linalg.svd(t_svd)
174
+ except LinAlgError:
175
+ svd_restarts += 1
176
+ print("SVD did not converge, randomizing and trying again")
177
+ break
178
+
179
+ ncut_value = 2.0 * (n_samples - S.sum())
180
+ if (abs(ncut_value - last_objective_value) < eps) or (n_iter > n_iter_max):
181
+ has_converged = True
182
+ else:
183
+ # otherwise calculate rotation and continue
184
+ last_objective_value = ncut_value
185
+ rotation = np.dot(Vh.T, U.T)
186
+
187
+ if not has_converged:
188
+ raise LinAlgError("SVD did not converge")
189
+ return labels
190
+
191
+
192
+ @validate_params(
193
+ {"affinity": ["array-like", "sparse matrix"]},
194
+ prefer_skip_nested_validation=False,
195
+ )
196
+ def spectral_clustering(
197
+ affinity,
198
+ *,
199
+ n_clusters=8,
200
+ n_components=None,
201
+ eigen_solver=None,
202
+ random_state=None,
203
+ n_init=10,
204
+ eigen_tol="auto",
205
+ assign_labels="kmeans",
206
+ verbose=False,
207
+ ):
208
+ """Apply clustering to a projection of the normalized Laplacian.
209
+
210
+ In practice Spectral Clustering is very useful when the structure of
211
+ the individual clusters is highly non-convex or more generally when
212
+ a measure of the center and spread of the cluster is not a suitable
213
+ description of the complete cluster. For instance, when clusters are
214
+ nested circles on the 2D plane.
215
+
216
+ If affinity is the adjacency matrix of a graph, this method can be
217
+ used to find normalized graph cuts [1]_, [2]_.
218
+
219
+ Read more in the :ref:`User Guide <spectral_clustering>`.
220
+
221
+ Parameters
222
+ ----------
223
+ affinity : {array-like, sparse matrix} of shape (n_samples, n_samples)
224
+ The affinity matrix describing the relationship of the samples to
225
+ embed. **Must be symmetric**.
226
+
227
+ Possible examples:
228
+ - adjacency matrix of a graph,
229
+ - heat kernel of the pairwise distance matrix of the samples,
230
+ - symmetric k-nearest neighbours connectivity matrix of the samples.
231
+
232
+ n_clusters : int, default=None
233
+ Number of clusters to extract.
234
+
235
+ n_components : int, default=n_clusters
236
+ Number of eigenvectors to use for the spectral embedding.
237
+
238
+ eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
239
+ The eigenvalue decomposition method. If None then ``'arpack'`` is used.
240
+ See [4]_ for more details regarding ``'lobpcg'``.
241
+ Eigensolver ``'amg'`` runs ``'lobpcg'`` with optional
242
+ Algebraic MultiGrid preconditioning and requires pyamg to be installed.
243
+ It can be faster on very large sparse problems [6]_ and [7]_.
244
+
245
+ random_state : int, RandomState instance, default=None
246
+ A pseudo random number generator used for the initialization
247
+ of the lobpcg eigenvectors decomposition when `eigen_solver ==
248
+ 'amg'`, and for the K-Means initialization. Use an int to make
249
+ the results deterministic across calls (See
250
+ :term:`Glossary <random_state>`).
251
+
252
+ .. note::
253
+ When using `eigen_solver == 'amg'`,
254
+ it is necessary to also fix the global numpy seed with
255
+ `np.random.seed(int)` to get deterministic results. See
256
+ https://github.com/pyamg/pyamg/issues/139 for further
257
+ information.
258
+
259
+ n_init : int, default=10
260
+ Number of time the k-means algorithm will be run with different
261
+ centroid seeds. The final results will be the best output of n_init
262
+ consecutive runs in terms of inertia. Only used if
263
+ ``assign_labels='kmeans'``.
264
+
265
+ eigen_tol : float, default="auto"
266
+ Stopping criterion for eigendecomposition of the Laplacian matrix.
267
+ If `eigen_tol="auto"` then the passed tolerance will depend on the
268
+ `eigen_solver`:
269
+
270
+ - If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
271
+ - If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
272
+ `eigen_tol=None` which configures the underlying `lobpcg` solver to
273
+ automatically resolve the value according to their heuristics. See,
274
+ :func:`scipy.sparse.linalg.lobpcg` for details.
275
+
276
+ Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"`
277
+ values of `tol<1e-5` may lead to convergence issues and should be
278
+ avoided.
279
+
280
+ .. versionadded:: 1.2
281
+ Added 'auto' option.
282
+
283
+ assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans'
284
+ The strategy to use to assign labels in the embedding
285
+ space. There are three ways to assign labels after the Laplacian
286
+ embedding. k-means can be applied and is a popular choice. But it can
287
+ also be sensitive to initialization. Discretization is another
288
+ approach which is less sensitive to random initialization [3]_.
289
+ The cluster_qr method [5]_ directly extracts clusters from eigenvectors
290
+ in spectral clustering. In contrast to k-means and discretization, cluster_qr
291
+ has no tuning parameters and is not an iterative method, yet may outperform
292
+ k-means and discretization in terms of both quality and speed.
293
+
294
+ .. versionchanged:: 1.1
295
+ Added new labeling method 'cluster_qr'.
296
+
297
+ verbose : bool, default=False
298
+ Verbosity mode.
299
+
300
+ .. versionadded:: 0.24
301
+
302
+ Returns
303
+ -------
304
+ labels : array of integers, shape: n_samples
305
+ The labels of the clusters.
306
+
307
+ Notes
308
+ -----
309
+ The graph should contain only one connected component, elsewhere
310
+ the results make little sense.
311
+
312
+ This algorithm solves the normalized cut for `k=2`: it is a
313
+ normalized spectral clustering.
314
+
315
+ References
316
+ ----------
317
+
318
+ .. [1] :doi:`Normalized cuts and image segmentation, 2000
319
+ Jianbo Shi, Jitendra Malik
320
+ <10.1109/34.868688>`
321
+
322
+ .. [2] :doi:`A Tutorial on Spectral Clustering, 2007
323
+ Ulrike von Luxburg
324
+ <10.1007/s11222-007-9033-z>`
325
+
326
+ .. [3] `Multiclass spectral clustering, 2003
327
+ Stella X. Yu, Jianbo Shi
328
+ <https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
329
+
330
+ .. [4] :doi:`Toward the Optimal Preconditioned Eigensolver:
331
+ Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001
332
+ A. V. Knyazev
333
+ SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541.
334
+ <10.1137/S1064827500366124>`
335
+
336
+ .. [5] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
337
+ Anil Damle, Victor Minden, Lexing Ying
338
+ <10.1093/imaiai/iay008>`
339
+
340
+ .. [6] :doi:`Multiscale Spectral Image Segmentation Multiscale preconditioning
341
+ for computing eigenvalues of graph Laplacians in image segmentation, 2006
342
+ Andrew Knyazev
343
+ <10.13140/RG.2.2.35280.02565>`
344
+
345
+ .. [7] :doi:`Preconditioned spectral clustering for stochastic block partition
346
+ streaming graph challenge (Preliminary version at arXiv.)
347
+ David Zhuzhunashvili, Andrew Knyazev
348
+ <10.1109/HPEC.2017.8091045>`
349
+
350
+ Examples
351
+ --------
352
+ >>> import numpy as np
353
+ >>> from sklearn.metrics.pairwise import pairwise_kernels
354
+ >>> from sklearn.cluster import spectral_clustering
355
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
356
+ ... [4, 7], [3, 5], [3, 6]])
357
+ >>> affinity = pairwise_kernels(X, metric='rbf')
358
+ >>> spectral_clustering(
359
+ ... affinity=affinity, n_clusters=2, assign_labels="discretize", random_state=0
360
+ ... )
361
+ array([1, 1, 1, 0, 0, 0])
362
+ """
363
+
364
+ clusterer = SpectralClustering(
365
+ n_clusters=n_clusters,
366
+ n_components=n_components,
367
+ eigen_solver=eigen_solver,
368
+ random_state=random_state,
369
+ n_init=n_init,
370
+ affinity="precomputed",
371
+ eigen_tol=eigen_tol,
372
+ assign_labels=assign_labels,
373
+ verbose=verbose,
374
+ ).fit(affinity)
375
+
376
+ return clusterer.labels_
377
+
378
+
379
+ class SpectralClustering(ClusterMixin, BaseEstimator):
380
+ """Apply clustering to a projection of the normalized Laplacian.
381
+
382
+ In practice Spectral Clustering is very useful when the structure of
383
+ the individual clusters is highly non-convex, or more generally when
384
+ a measure of the center and spread of the cluster is not a suitable
385
+ description of the complete cluster, such as when clusters are
386
+ nested circles on the 2D plane.
387
+
388
+ If the affinity matrix is the adjacency matrix of a graph, this method
389
+ can be used to find normalized graph cuts [1]_, [2]_.
390
+
391
+ When calling ``fit``, an affinity matrix is constructed using either
392
+ a kernel function such the Gaussian (aka RBF) kernel with Euclidean
393
+ distance ``d(X, X)``::
394
+
395
+ np.exp(-gamma * d(X,X) ** 2)
396
+
397
+ or a k-nearest neighbors connectivity matrix.
398
+
399
+ Alternatively, a user-provided affinity matrix can be specified by
400
+ setting ``affinity='precomputed'``.
401
+
402
+ Read more in the :ref:`User Guide <spectral_clustering>`.
403
+
404
+ Parameters
405
+ ----------
406
+ n_clusters : int, default=8
407
+ The dimension of the projection subspace.
408
+
409
+ eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
410
+ The eigenvalue decomposition strategy to use. AMG requires pyamg
411
+ to be installed. It can be faster on very large, sparse problems,
412
+ but may also lead to instabilities. If None, then ``'arpack'`` is
413
+ used. See [4]_ for more details regarding `'lobpcg'`.
414
+
415
+ n_components : int, default=None
416
+ Number of eigenvectors to use for the spectral embedding. If None,
417
+ defaults to `n_clusters`.
418
+
419
+ random_state : int, RandomState instance, default=None
420
+ A pseudo random number generator used for the initialization
421
+ of the lobpcg eigenvectors decomposition when `eigen_solver ==
422
+ 'amg'`, and for the K-Means initialization. Use an int to make
423
+ the results deterministic across calls (See
424
+ :term:`Glossary <random_state>`).
425
+
426
+ .. note::
427
+ When using `eigen_solver == 'amg'`,
428
+ it is necessary to also fix the global numpy seed with
429
+ `np.random.seed(int)` to get deterministic results. See
430
+ https://github.com/pyamg/pyamg/issues/139 for further
431
+ information.
432
+
433
+ n_init : int, default=10
434
+ Number of time the k-means algorithm will be run with different
435
+ centroid seeds. The final results will be the best output of n_init
436
+ consecutive runs in terms of inertia. Only used if
437
+ ``assign_labels='kmeans'``.
438
+
439
+ gamma : float, default=1.0
440
+ Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
441
+ Ignored for ``affinity='nearest_neighbors'``.
442
+
443
+ affinity : str or callable, default='rbf'
444
+ How to construct the affinity matrix.
445
+ - 'nearest_neighbors': construct the affinity matrix by computing a
446
+ graph of nearest neighbors.
447
+ - 'rbf': construct the affinity matrix using a radial basis function
448
+ (RBF) kernel.
449
+ - 'precomputed': interpret ``X`` as a precomputed affinity matrix,
450
+ where larger values indicate greater similarity between instances.
451
+ - 'precomputed_nearest_neighbors': interpret ``X`` as a sparse graph
452
+ of precomputed distances, and construct a binary affinity matrix
453
+ from the ``n_neighbors`` nearest neighbors of each instance.
454
+ - one of the kernels supported by
455
+ :func:`~sklearn.metrics.pairwise.pairwise_kernels`.
456
+
457
+ Only kernels that produce similarity scores (non-negative values that
458
+ increase with similarity) should be used. This property is not checked
459
+ by the clustering algorithm.
460
+
461
+ n_neighbors : int, default=10
462
+ Number of neighbors to use when constructing the affinity matrix using
463
+ the nearest neighbors method. Ignored for ``affinity='rbf'``.
464
+
465
+ eigen_tol : float, default="auto"
466
+ Stopping criterion for eigen decomposition of the Laplacian matrix.
467
+ If `eigen_tol="auto"` then the passed tolerance will depend on the
468
+ `eigen_solver`:
469
+
470
+ - If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
471
+ - If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
472
+ `eigen_tol=None` which configures the underlying `lobpcg` solver to
473
+ automatically resolve the value according to their heuristics. See,
474
+ :func:`scipy.sparse.linalg.lobpcg` for details.
475
+
476
+ Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"`
477
+ values of `tol<1e-5` may lead to convergence issues and should be
478
+ avoided.
479
+
480
+ .. versionadded:: 1.2
481
+ Added 'auto' option.
482
+
483
+ assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans'
484
+ The strategy for assigning labels in the embedding space. There are two
485
+ ways to assign labels after the Laplacian embedding. k-means is a
486
+ popular choice, but it can be sensitive to initialization.
487
+ Discretization is another approach which is less sensitive to random
488
+ initialization [3]_.
489
+ The cluster_qr method [5]_ directly extract clusters from eigenvectors
490
+ in spectral clustering. In contrast to k-means and discretization, cluster_qr
491
+ has no tuning parameters and runs no iterations, yet may outperform
492
+ k-means and discretization in terms of both quality and speed.
493
+
494
+ .. versionchanged:: 1.1
495
+ Added new labeling method 'cluster_qr'.
496
+
497
+ degree : float, default=3
498
+ Degree of the polynomial kernel. Ignored by other kernels.
499
+
500
+ coef0 : float, default=1
501
+ Zero coefficient for polynomial and sigmoid kernels.
502
+ Ignored by other kernels.
503
+
504
+ kernel_params : dict of str to any, default=None
505
+ Parameters (keyword arguments) and values for kernel passed as
506
+ callable object. Ignored by other kernels.
507
+
508
+ n_jobs : int, default=None
509
+ The number of parallel jobs to run when `affinity='nearest_neighbors'`
510
+ or `affinity='precomputed_nearest_neighbors'`. The neighbors search
511
+ will be done in parallel.
512
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
513
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
514
+ for more details.
515
+
516
+ verbose : bool, default=False
517
+ Verbosity mode.
518
+
519
+ .. versionadded:: 0.24
520
+
521
+ Attributes
522
+ ----------
523
+ affinity_matrix_ : array-like of shape (n_samples, n_samples)
524
+ Affinity matrix used for clustering. Available only after calling
525
+ ``fit``.
526
+
527
+ labels_ : ndarray of shape (n_samples,)
528
+ Labels of each point
529
+
530
+ n_features_in_ : int
531
+ Number of features seen during :term:`fit`.
532
+
533
+ .. versionadded:: 0.24
534
+
535
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
536
+ Names of features seen during :term:`fit`. Defined only when `X`
537
+ has feature names that are all strings.
538
+
539
+ .. versionadded:: 1.0
540
+
541
+ See Also
542
+ --------
543
+ sklearn.cluster.KMeans : K-Means clustering.
544
+ sklearn.cluster.DBSCAN : Density-Based Spatial Clustering of
545
+ Applications with Noise.
546
+
547
+ Notes
548
+ -----
549
+ A distance matrix for which 0 indicates identical elements and high values
550
+ indicate very dissimilar elements can be transformed into an affinity /
551
+ similarity matrix that is well-suited for the algorithm by
552
+ applying the Gaussian (aka RBF, heat) kernel::
553
+
554
+ np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
555
+
556
+ where ``delta`` is a free parameter representing the width of the Gaussian
557
+ kernel.
558
+
559
+ An alternative is to take a symmetric version of the k-nearest neighbors
560
+ connectivity matrix of the points.
561
+
562
+ If the pyamg package is installed, it is used: this greatly
563
+ speeds up computation.
564
+
565
+ References
566
+ ----------
567
+ .. [1] :doi:`Normalized cuts and image segmentation, 2000
568
+ Jianbo Shi, Jitendra Malik
569
+ <10.1109/34.868688>`
570
+
571
+ .. [2] :doi:`A Tutorial on Spectral Clustering, 2007
572
+ Ulrike von Luxburg
573
+ <10.1007/s11222-007-9033-z>`
574
+
575
+ .. [3] `Multiclass spectral clustering, 2003
576
+ Stella X. Yu, Jianbo Shi
577
+ <https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
578
+
579
+ .. [4] :doi:`Toward the Optimal Preconditioned Eigensolver:
580
+ Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001
581
+ A. V. Knyazev
582
+ SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541.
583
+ <10.1137/S1064827500366124>`
584
+
585
+ .. [5] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
586
+ Anil Damle, Victor Minden, Lexing Ying
587
+ <10.1093/imaiai/iay008>`
588
+
589
+ Examples
590
+ --------
591
+ >>> from sklearn.cluster import SpectralClustering
592
+ >>> import numpy as np
593
+ >>> X = np.array([[1, 1], [2, 1], [1, 0],
594
+ ... [4, 7], [3, 5], [3, 6]])
595
+ >>> clustering = SpectralClustering(n_clusters=2,
596
+ ... assign_labels='discretize',
597
+ ... random_state=0).fit(X)
598
+ >>> clustering.labels_
599
+ array([1, 1, 1, 0, 0, 0])
600
+ >>> clustering
601
+ SpectralClustering(assign_labels='discretize', n_clusters=2,
602
+ random_state=0)
603
+ """
604
+
605
+ _parameter_constraints: dict = {
606
+ "n_clusters": [Interval(Integral, 1, None, closed="left")],
607
+ "eigen_solver": [StrOptions({"arpack", "lobpcg", "amg"}), None],
608
+ "n_components": [Interval(Integral, 1, None, closed="left"), None],
609
+ "random_state": ["random_state"],
610
+ "n_init": [Interval(Integral, 1, None, closed="left")],
611
+ "gamma": [Interval(Real, 0, None, closed="left")],
612
+ "affinity": [
613
+ callable,
614
+ StrOptions(
615
+ set(KERNEL_PARAMS)
616
+ | {"nearest_neighbors", "precomputed", "precomputed_nearest_neighbors"}
617
+ ),
618
+ ],
619
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
620
+ "eigen_tol": [
621
+ Interval(Real, 0.0, None, closed="left"),
622
+ StrOptions({"auto"}),
623
+ ],
624
+ "assign_labels": [StrOptions({"kmeans", "discretize", "cluster_qr"})],
625
+ "degree": [Interval(Real, 0, None, closed="left")],
626
+ "coef0": [Interval(Real, None, None, closed="neither")],
627
+ "kernel_params": [dict, None],
628
+ "n_jobs": [Integral, None],
629
+ "verbose": ["verbose"],
630
+ }
631
+
632
+ def __init__(
633
+ self,
634
+ n_clusters=8,
635
+ *,
636
+ eigen_solver=None,
637
+ n_components=None,
638
+ random_state=None,
639
+ n_init=10,
640
+ gamma=1.0,
641
+ affinity="rbf",
642
+ n_neighbors=10,
643
+ eigen_tol="auto",
644
+ assign_labels="kmeans",
645
+ degree=3,
646
+ coef0=1,
647
+ kernel_params=None,
648
+ n_jobs=None,
649
+ verbose=False,
650
+ ):
651
+ self.n_clusters = n_clusters
652
+ self.eigen_solver = eigen_solver
653
+ self.n_components = n_components
654
+ self.random_state = random_state
655
+ self.n_init = n_init
656
+ self.gamma = gamma
657
+ self.affinity = affinity
658
+ self.n_neighbors = n_neighbors
659
+ self.eigen_tol = eigen_tol
660
+ self.assign_labels = assign_labels
661
+ self.degree = degree
662
+ self.coef0 = coef0
663
+ self.kernel_params = kernel_params
664
+ self.n_jobs = n_jobs
665
+ self.verbose = verbose
666
+
667
+ @_fit_context(prefer_skip_nested_validation=True)
668
+ def fit(self, X, y=None):
669
+ """Perform spectral clustering from features, or affinity matrix.
670
+
671
+ Parameters
672
+ ----------
673
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
674
+ (n_samples, n_samples)
675
+ Training instances to cluster, similarities / affinities between
676
+ instances if ``affinity='precomputed'``, or distances between
677
+ instances if ``affinity='precomputed_nearest_neighbors``. If a
678
+ sparse matrix is provided in a format other than ``csr_matrix``,
679
+ ``csc_matrix``, or ``coo_matrix``, it will be converted into a
680
+ sparse ``csr_matrix``.
681
+
682
+ y : Ignored
683
+ Not used, present here for API consistency by convention.
684
+
685
+ Returns
686
+ -------
687
+ self : object
688
+ A fitted instance of the estimator.
689
+ """
690
+ X = self._validate_data(
691
+ X,
692
+ accept_sparse=["csr", "csc", "coo"],
693
+ dtype=np.float64,
694
+ ensure_min_samples=2,
695
+ )
696
+ allow_squared = self.affinity in [
697
+ "precomputed",
698
+ "precomputed_nearest_neighbors",
699
+ ]
700
+ if X.shape[0] == X.shape[1] and not allow_squared:
701
+ warnings.warn(
702
+ "The spectral clustering API has changed. ``fit``"
703
+ "now constructs an affinity matrix from data. To use"
704
+ " a custom affinity matrix, "
705
+ "set ``affinity=precomputed``."
706
+ )
707
+
708
+ if self.affinity == "nearest_neighbors":
709
+ connectivity = kneighbors_graph(
710
+ X, n_neighbors=self.n_neighbors, include_self=True, n_jobs=self.n_jobs
711
+ )
712
+ self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
713
+ elif self.affinity == "precomputed_nearest_neighbors":
714
+ estimator = NearestNeighbors(
715
+ n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
716
+ ).fit(X)
717
+ connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
718
+ self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
719
+ elif self.affinity == "precomputed":
720
+ self.affinity_matrix_ = X
721
+ else:
722
+ params = self.kernel_params
723
+ if params is None:
724
+ params = {}
725
+ if not callable(self.affinity):
726
+ params["gamma"] = self.gamma
727
+ params["degree"] = self.degree
728
+ params["coef0"] = self.coef0
729
+ self.affinity_matrix_ = pairwise_kernels(
730
+ X, metric=self.affinity, filter_params=True, **params
731
+ )
732
+
733
+ random_state = check_random_state(self.random_state)
734
+ n_components = (
735
+ self.n_clusters if self.n_components is None else self.n_components
736
+ )
737
+ # We now obtain the real valued solution matrix to the
738
+ # relaxed Ncut problem, solving the eigenvalue problem
739
+ # L_sym x = lambda x and recovering u = D^-1/2 x.
740
+ # The first eigenvector is constant only for fully connected graphs
741
+ # and should be kept for spectral clustering (drop_first = False)
742
+ # See spectral_embedding documentation.
743
+ maps = spectral_embedding(
744
+ self.affinity_matrix_,
745
+ n_components=n_components,
746
+ eigen_solver=self.eigen_solver,
747
+ random_state=random_state,
748
+ eigen_tol=self.eigen_tol,
749
+ drop_first=False,
750
+ )
751
+ if self.verbose:
752
+ print(f"Computing label assignment using {self.assign_labels}")
753
+
754
+ if self.assign_labels == "kmeans":
755
+ _, self.labels_, _ = k_means(
756
+ maps,
757
+ self.n_clusters,
758
+ random_state=random_state,
759
+ n_init=self.n_init,
760
+ verbose=self.verbose,
761
+ )
762
+ elif self.assign_labels == "cluster_qr":
763
+ self.labels_ = cluster_qr(maps)
764
+ else:
765
+ self.labels_ = discretize(maps, random_state=random_state)
766
+
767
+ return self
768
+
769
+ def fit_predict(self, X, y=None):
770
+ """Perform spectral clustering on `X` and return cluster labels.
771
+
772
+ Parameters
773
+ ----------
774
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
775
+ (n_samples, n_samples)
776
+ Training instances to cluster, similarities / affinities between
777
+ instances if ``affinity='precomputed'``, or distances between
778
+ instances if ``affinity='precomputed_nearest_neighbors``. If a
779
+ sparse matrix is provided in a format other than ``csr_matrix``,
780
+ ``csc_matrix``, or ``coo_matrix``, it will be converted into a
781
+ sparse ``csr_matrix``.
782
+
783
+ y : Ignored
784
+ Not used, present here for API consistency by convention.
785
+
786
+ Returns
787
+ -------
788
+ labels : ndarray of shape (n_samples,)
789
+ Cluster labels.
790
+ """
791
+ return super().fit_predict(X, y)
792
+
793
+ def _more_tags(self):
794
+ return {
795
+ "pairwise": self.affinity in [
796
+ "precomputed",
797
+ "precomputed_nearest_neighbors",
798
+ ]
799
+ }
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc ADDED
Binary file (823 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc ADDED
Binary file (9.64 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc ADDED
Binary file (7.73 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc ADDED
Binary file (7.2 kB). View file