diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d3104d816bfb63e2038adb9897b2d15cd0d9c3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__init__.py @@ -0,0 +1,56 @@ +""" +The :mod:`sklearn.cluster` module gathers popular unsupervised clustering +algorithms. +""" + +from ._affinity_propagation import AffinityPropagation, affinity_propagation +from ._agglomerative import ( + AgglomerativeClustering, + FeatureAgglomeration, + linkage_tree, + ward_tree, +) +from ._bicluster import SpectralBiclustering, SpectralCoclustering +from ._birch import Birch +from ._bisect_k_means import BisectingKMeans +from ._dbscan import DBSCAN, dbscan +from ._hdbscan.hdbscan import HDBSCAN +from ._kmeans import KMeans, MiniBatchKMeans, k_means, kmeans_plusplus +from ._mean_shift import MeanShift, estimate_bandwidth, get_bin_seeds, mean_shift +from ._optics import ( + OPTICS, + cluster_optics_dbscan, + cluster_optics_xi, + compute_optics_graph, +) +from ._spectral import SpectralClustering, spectral_clustering + +__all__ = [ + "AffinityPropagation", + "AgglomerativeClustering", + "Birch", + "DBSCAN", + "OPTICS", + "cluster_optics_dbscan", + "cluster_optics_xi", + "compute_optics_graph", + "KMeans", + "BisectingKMeans", + "FeatureAgglomeration", + "MeanShift", + "MiniBatchKMeans", + "SpectralClustering", + "affinity_propagation", + "dbscan", + "estimate_bandwidth", + "get_bin_seeds", + "k_means", + "kmeans_plusplus", + "linkage_tree", + "mean_shift", + "spectral_clustering", + "ward_tree", + "SpectralBiclustering", + "SpectralCoclustering", + "HDBSCAN", +] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5018e4ead741df1d8fab632f3751dbfd74b85188 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f61385cf60b41c2b963cbe0fd4a8423b751da29 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b25aaaa703a7261f18b335ea63c0057dd1739426 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b96b51b65937d65d47f0fc7698da714ef016e9e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..666a9cf043b53a06f13c67af64743ff4501429bb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6c42a9a65e83fd3c01f178b66aac0043197cb92 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f34ca13112d11b5bd63ab3ed96d8682560de3a55 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..353559db7ef5adfa1a112e7bf397151cb7d1cd67 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc6d2b3313e352d5fe7f95781142dc7507ac3d78 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5541d06a7d1a21866faa30db3d85d1eb34e47b92 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0650a3310d464bf87e03f25d3b24b5dbb07dccc2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57cdc719258c971c5b059fd02a3c3ac3ed161dda Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_affinity_propagation.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_affinity_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..735e30d3ea4b29f65a29a297ad525fd2780001b4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_affinity_propagation.py @@ -0,0 +1,604 @@ +"""Affinity Propagation clustering algorithm.""" + +# Author: Alexandre Gramfort alexandre.gramfort@inria.fr +# Gael Varoquaux gael.varoquaux@normalesup.org + +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np + +from .._config import config_context +from ..base import BaseEstimator, ClusterMixin, _fit_context +from ..exceptions import ConvergenceWarning +from ..metrics import euclidean_distances, pairwise_distances_argmin +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.validation import check_is_fitted + + +def _equal_similarities_and_preferences(S, preference): + def all_equal_preferences(): + return np.all(preference == preference.flat[0]) + + def all_equal_similarities(): + # Create mask to ignore diagonal of S + mask = np.ones(S.shape, dtype=bool) + np.fill_diagonal(mask, 0) + + return np.all(S[mask].flat == S[mask].flat[0]) + + return all_equal_preferences() and all_equal_similarities() + + +def _affinity_propagation( + S, + *, + preference, + convergence_iter, + max_iter, + damping, + verbose, + return_n_iter, + random_state, +): + """Main affinity propagation algorithm.""" + n_samples = S.shape[0] + if n_samples == 1 or _equal_similarities_and_preferences(S, preference): + # It makes no sense to run the algorithm in this case, so return 1 or + # n_samples clusters, depending on preferences + warnings.warn( + "All samples have mutually equal similarities. " + "Returning arbitrary cluster center(s)." + ) + if preference.flat[0] > S.flat[n_samples - 1]: + return ( + (np.arange(n_samples), np.arange(n_samples), 0) + if return_n_iter + else (np.arange(n_samples), np.arange(n_samples)) + ) + else: + return ( + (np.array([0]), np.array([0] * n_samples), 0) + if return_n_iter + else (np.array([0]), np.array([0] * n_samples)) + ) + + # Place preference on the diagonal of S + S.flat[:: (n_samples + 1)] = preference + + A = np.zeros((n_samples, n_samples)) + R = np.zeros((n_samples, n_samples)) # Initialize messages + # Intermediate results + tmp = np.zeros((n_samples, n_samples)) + + # Remove degeneracies + S += ( + np.finfo(S.dtype).eps * S + np.finfo(S.dtype).tiny * 100 + ) * random_state.standard_normal(size=(n_samples, n_samples)) + + # Execute parallel affinity propagation updates + e = np.zeros((n_samples, convergence_iter)) + + ind = np.arange(n_samples) + + for it in range(max_iter): + # tmp = A + S; compute responsibilities + np.add(A, S, tmp) + I = np.argmax(tmp, axis=1) + Y = tmp[ind, I] # np.max(A + S, axis=1) + tmp[ind, I] = -np.inf + Y2 = np.max(tmp, axis=1) + + # tmp = Rnew + np.subtract(S, Y[:, None], tmp) + tmp[ind, I] = S[ind, I] - Y2 + + # Damping + tmp *= 1 - damping + R *= damping + R += tmp + + # tmp = Rp; compute availabilities + np.maximum(R, 0, tmp) + tmp.flat[:: n_samples + 1] = R.flat[:: n_samples + 1] + + # tmp = -Anew + tmp -= np.sum(tmp, axis=0) + dA = np.diag(tmp).copy() + tmp.clip(0, np.inf, tmp) + tmp.flat[:: n_samples + 1] = dA + + # Damping + tmp *= 1 - damping + A *= damping + A -= tmp + + # Check for convergence + E = (np.diag(A) + np.diag(R)) > 0 + e[:, it % convergence_iter] = E + K = np.sum(E, axis=0) + + if it >= convergence_iter: + se = np.sum(e, axis=1) + unconverged = np.sum((se == convergence_iter) + (se == 0)) != n_samples + if (not unconverged and (K > 0)) or (it == max_iter): + never_converged = False + if verbose: + print("Converged after %d iterations." % it) + break + else: + never_converged = True + if verbose: + print("Did not converge") + + I = np.flatnonzero(E) + K = I.size # Identify exemplars + + if K > 0: + if never_converged: + warnings.warn( + ( + "Affinity propagation did not converge, this model " + "may return degenerate cluster centers and labels." + ), + ConvergenceWarning, + ) + c = np.argmax(S[:, I], axis=1) + c[I] = np.arange(K) # Identify clusters + # Refine the final set of exemplars and clusters and return results + for k in range(K): + ii = np.where(c == k)[0] + j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0)) + I[k] = ii[j] + + c = np.argmax(S[:, I], axis=1) + c[I] = np.arange(K) + labels = I[c] + # Reduce labels to a sorted, gapless, list + cluster_centers_indices = np.unique(labels) + labels = np.searchsorted(cluster_centers_indices, labels) + else: + warnings.warn( + ( + "Affinity propagation did not converge and this model " + "will not have any cluster centers." + ), + ConvergenceWarning, + ) + labels = np.array([-1] * n_samples) + cluster_centers_indices = [] + + if return_n_iter: + return cluster_centers_indices, labels, it + 1 + else: + return cluster_centers_indices, labels + + +############################################################################### +# Public API + + +@validate_params( + { + "S": ["array-like"], + "return_n_iter": ["boolean"], + }, + prefer_skip_nested_validation=False, +) +def affinity_propagation( + S, + *, + preference=None, + convergence_iter=15, + max_iter=200, + damping=0.5, + copy=True, + verbose=False, + return_n_iter=False, + random_state=None, +): + """Perform Affinity Propagation Clustering of data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + S : array-like of shape (n_samples, n_samples) + Matrix of similarities between points. + + preference : array-like of shape (n_samples,) or float, default=None + Preferences for each point - points with larger values of + preferences are more likely to be chosen as exemplars. The number of + exemplars, i.e. of clusters, is influenced by the input preferences + value. If the preferences are not passed as arguments, they will be + set to the median of the input similarities (resulting in a moderate + number of clusters). For a smaller amount of clusters, this can be set + to the minimum value of the similarities. + + convergence_iter : int, default=15 + Number of iterations with no change in the number + of estimated clusters that stops the convergence. + + max_iter : int, default=200 + Maximum number of iterations. + + damping : float, default=0.5 + Damping factor between 0.5 and 1. + + copy : bool, default=True + If copy is False, the affinity matrix is modified inplace by the + algorithm, for memory efficiency. + + verbose : bool, default=False + The verbosity level. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the starting state. + Use an int for reproducible results across function calls. + See the :term:`Glossary `. + + .. versionadded:: 0.23 + this parameter was previously hardcoded as 0. + + Returns + ------- + cluster_centers_indices : ndarray of shape (n_clusters,) + Index of clusters centers. + + labels : ndarray of shape (n_samples,) + Cluster labels for each point. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is + set to True. + + Notes + ----- + For an example, see :ref:`examples/cluster/plot_affinity_propagation.py + `. + + When the algorithm does not converge, it will still return a arrays of + ``cluster_center_indices`` and labels if there are any exemplars/clusters, + however they may be degenerate and should be used with caution. + + When all training samples have equal similarities and equal preferences, + the assignment of cluster centers and labels depends on the preference. + If the preference is smaller than the similarities, a single cluster center + and label ``0`` for every sample will be returned. Otherwise, every + training sample becomes its own cluster center and is assigned a unique + label. + + References + ---------- + Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages + Between Data Points", Science Feb. 2007 + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import affinity_propagation + >>> from sklearn.metrics.pairwise import euclidean_distances + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [4, 2], [4, 4], [4, 0]]) + >>> S = -euclidean_distances(X, squared=True) + >>> cluster_centers_indices, labels = affinity_propagation(S, random_state=0) + >>> cluster_centers_indices + array([0, 3]) + >>> labels + array([0, 0, 0, 1, 1, 1]) + """ + estimator = AffinityPropagation( + damping=damping, + max_iter=max_iter, + convergence_iter=convergence_iter, + copy=copy, + preference=preference, + affinity="precomputed", + verbose=verbose, + random_state=random_state, + ).fit(S) + + if return_n_iter: + return estimator.cluster_centers_indices_, estimator.labels_, estimator.n_iter_ + return estimator.cluster_centers_indices_, estimator.labels_ + + +class AffinityPropagation(ClusterMixin, BaseEstimator): + """Perform Affinity Propagation Clustering of data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + damping : float, default=0.5 + Damping factor in the range `[0.5, 1.0)` is the extent to + which the current value is maintained relative to + incoming values (weighted 1 - damping). This in order + to avoid numerical oscillations when updating these + values (messages). + + max_iter : int, default=200 + Maximum number of iterations. + + convergence_iter : int, default=15 + Number of iterations with no change in the number + of estimated clusters that stops the convergence. + + copy : bool, default=True + Make a copy of input data. + + preference : array-like of shape (n_samples,) or float, default=None + Preferences for each point - points with larger values of + preferences are more likely to be chosen as exemplars. The number + of exemplars, ie of clusters, is influenced by the input + preferences value. If the preferences are not passed as arguments, + they will be set to the median of the input similarities. + + affinity : {'euclidean', 'precomputed'}, default='euclidean' + Which affinity to use. At the moment 'precomputed' and + ``euclidean`` are supported. 'euclidean' uses the + negative squared euclidean distance between points. + + verbose : bool, default=False + Whether to be verbose. + + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the starting state. + Use an int for reproducible results across function calls. + See the :term:`Glossary `. + + .. versionadded:: 0.23 + this parameter was previously hardcoded as 0. + + Attributes + ---------- + cluster_centers_indices_ : ndarray of shape (n_clusters,) + Indices of cluster centers. + + cluster_centers_ : ndarray of shape (n_clusters, n_features) + Cluster centers (if affinity != ``precomputed``). + + labels_ : ndarray of shape (n_samples,) + Labels of each point. + + affinity_matrix_ : ndarray of shape (n_samples, n_samples) + Stores the affinity matrix used in ``fit``. + + n_iter_ : int + Number of iterations taken to converge. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AgglomerativeClustering : Recursively merges the pair of + clusters that minimally increases a given linkage distance. + FeatureAgglomeration : Similar to AgglomerativeClustering, + but recursively merges features instead of samples. + KMeans : K-Means clustering. + MiniBatchKMeans : Mini-Batch K-Means clustering. + MeanShift : Mean shift clustering using a flat kernel. + SpectralClustering : Apply clustering to a projection + of the normalized Laplacian. + + Notes + ----- + For an example, see :ref:`examples/cluster/plot_affinity_propagation.py + `. + + The algorithmic complexity of affinity propagation is quadratic + in the number of points. + + When the algorithm does not converge, it will still return a arrays of + ``cluster_center_indices`` and labels if there are any exemplars/clusters, + however they may be degenerate and should be used with caution. + + When ``fit`` does not converge, ``cluster_centers_`` is still populated + however it may be degenerate. In such a case, proceed with caution. + If ``fit`` does not converge and fails to produce any ``cluster_centers_`` + then ``predict`` will label every sample as ``-1``. + + When all training samples have equal similarities and equal preferences, + the assignment of cluster centers and labels depends on the preference. + If the preference is smaller than the similarities, ``fit`` will result in + a single cluster center and label ``0`` for every sample. Otherwise, every + training sample becomes its own cluster center and is assigned a unique + label. + + References + ---------- + + Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages + Between Data Points", Science Feb. 2007 + + Examples + -------- + >>> from sklearn.cluster import AffinityPropagation + >>> import numpy as np + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [4, 2], [4, 4], [4, 0]]) + >>> clustering = AffinityPropagation(random_state=5).fit(X) + >>> clustering + AffinityPropagation(random_state=5) + >>> clustering.labels_ + array([0, 0, 0, 1, 1, 1]) + >>> clustering.predict([[0, 0], [4, 4]]) + array([0, 1]) + >>> clustering.cluster_centers_ + array([[1, 2], + [4, 2]]) + """ + + _parameter_constraints: dict = { + "damping": [Interval(Real, 0.5, 1.0, closed="left")], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "convergence_iter": [Interval(Integral, 1, None, closed="left")], + "copy": ["boolean"], + "preference": [ + "array-like", + Interval(Real, None, None, closed="neither"), + None, + ], + "affinity": [StrOptions({"euclidean", "precomputed"})], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + *, + damping=0.5, + max_iter=200, + convergence_iter=15, + copy=True, + preference=None, + affinity="euclidean", + verbose=False, + random_state=None, + ): + self.damping = damping + self.max_iter = max_iter + self.convergence_iter = convergence_iter + self.copy = copy + self.verbose = verbose + self.preference = preference + self.affinity = affinity + self.random_state = random_state + + def _more_tags(self): + return {"pairwise": self.affinity == "precomputed"} + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the clustering from features, or affinity matrix. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ + array-like of shape (n_samples, n_samples) + Training instances to cluster, or similarities / affinities between + instances if ``affinity='precomputed'``. If a sparse feature matrix + is provided, it will be converted into a sparse ``csr_matrix``. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self + Returns the instance itself. + """ + if self.affinity == "precomputed": + accept_sparse = False + else: + accept_sparse = "csr" + X = self._validate_data(X, accept_sparse=accept_sparse) + if self.affinity == "precomputed": + self.affinity_matrix_ = X.copy() if self.copy else X + else: # self.affinity == "euclidean" + self.affinity_matrix_ = -euclidean_distances(X, squared=True) + + if self.affinity_matrix_.shape[0] != self.affinity_matrix_.shape[1]: + raise ValueError( + "The matrix of similarities must be a square array. " + f"Got {self.affinity_matrix_.shape} instead." + ) + + if self.preference is None: + preference = np.median(self.affinity_matrix_) + else: + preference = self.preference + preference = np.asarray(preference) + + random_state = check_random_state(self.random_state) + + ( + self.cluster_centers_indices_, + self.labels_, + self.n_iter_, + ) = _affinity_propagation( + self.affinity_matrix_, + max_iter=self.max_iter, + convergence_iter=self.convergence_iter, + preference=preference, + damping=self.damping, + verbose=self.verbose, + return_n_iter=True, + random_state=random_state, + ) + + if self.affinity != "precomputed": + self.cluster_centers_ = X[self.cluster_centers_indices_].copy() + + return self + + def predict(self, X): + """Predict the closest cluster each sample in X belongs to. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data to predict. If a sparse matrix is provided, it will be + converted into a sparse ``csr_matrix``. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Cluster labels. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False, accept_sparse="csr") + if not hasattr(self, "cluster_centers_"): + raise ValueError( + "Predict method is not supported when affinity='precomputed'." + ) + + if self.cluster_centers_.shape[0] > 0: + with config_context(assume_finite=True): + return pairwise_distances_argmin(X, self.cluster_centers_) + else: + warnings.warn( + ( + "This model does not have any cluster centers " + "because affinity propagation did not converge. " + "Labeling every sample as '-1'." + ), + ConvergenceWarning, + ) + return np.array([-1] * X.shape[0]) + + def fit_predict(self, X, y=None): + """Fit clustering from features/affinity matrix; return cluster labels. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ + array-like of shape (n_samples, n_samples) + Training instances to cluster, or similarities / affinities between + instances if ``affinity='precomputed'``. If a sparse feature matrix + is provided, it will be converted into a sparse ``csr_matrix``. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Cluster labels. + """ + return super().fit_predict(X, y) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_agglomerative.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_agglomerative.py new file mode 100644 index 0000000000000000000000000000000000000000..884d1605e70c3b3b3936fce956d3fb7f55ff449a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_agglomerative.py @@ -0,0 +1,1336 @@ +"""Hierarchical Agglomerative Clustering + +These routines perform some hierarchical agglomerative clustering of some +input data. + +Authors : Vincent Michel, Bertrand Thirion, Alexandre Gramfort, + Gael Varoquaux +License: BSD 3 clause +""" +import warnings +from heapq import heapify, heappop, heappush, heappushpop +from numbers import Integral, Real + +import numpy as np +from scipy import sparse +from scipy.sparse.csgraph import connected_components + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + ClusterMixin, + _fit_context, +) +from ..metrics import DistanceMetric +from ..metrics._dist_metrics import METRIC_MAPPING64 +from ..metrics.pairwise import _VALID_METRICS, paired_distances +from ..utils import check_array +from ..utils._fast_dict import IntFloatDict +from ..utils._param_validation import ( + HasMethods, + Hidden, + Interval, + StrOptions, + validate_params, +) +from ..utils.graph import _fix_connected_components +from ..utils.validation import check_memory + +# mypy error: Module 'sklearn.cluster' has no attribute '_hierarchical_fast' +from . import _hierarchical_fast as _hierarchical # type: ignore +from ._feature_agglomeration import AgglomerationTransform + +############################################################################### +# For non fully-connected graphs + + +def _fix_connectivity(X, connectivity, affinity): + """ + Fixes the connectivity matrix. + + The different steps are: + + - copies it + - makes it symmetric + - converts it to LIL if necessary + - completes it if necessary. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Feature matrix representing `n_samples` samples to be clustered. + + connectivity : sparse matrix, default=None + Connectivity matrix. Defines for each sample the neighboring samples + following a given structure of the data. The matrix is assumed to + be symmetric and only the upper triangular half is used. + Default is `None`, i.e, the Ward algorithm is unstructured. + + affinity : {"euclidean", "precomputed"}, default="euclidean" + Which affinity to use. At the moment `precomputed` and + ``euclidean`` are supported. `euclidean` uses the + negative squared Euclidean distance between points. + + Returns + ------- + connectivity : sparse matrix + The fixed connectivity matrix. + + n_connected_components : int + The number of connected components in the graph. + """ + n_samples = X.shape[0] + if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples: + raise ValueError( + "Wrong shape for connectivity matrix: %s when X is %s" + % (connectivity.shape, X.shape) + ) + + # Make the connectivity matrix symmetric: + connectivity = connectivity + connectivity.T + + # Convert connectivity matrix to LIL + if not sparse.issparse(connectivity): + connectivity = sparse.lil_matrix(connectivity) + + # `connectivity` is a sparse matrix at this point + if connectivity.format != "lil": + connectivity = connectivity.tolil() + + # Compute the number of nodes + n_connected_components, labels = connected_components(connectivity) + + if n_connected_components > 1: + warnings.warn( + "the number of connected components of the " + "connectivity matrix is %d > 1. Completing it to avoid " + "stopping the tree early." % n_connected_components, + stacklevel=2, + ) + # XXX: Can we do without completing the matrix? + connectivity = _fix_connected_components( + X=X, + graph=connectivity, + n_connected_components=n_connected_components, + component_labels=labels, + metric=affinity, + mode="connectivity", + ) + + return connectivity, n_connected_components + + +def _single_linkage_tree( + connectivity, + n_samples, + n_nodes, + n_clusters, + n_connected_components, + return_distance, +): + """ + Perform single linkage clustering on sparse data via the minimum + spanning tree from scipy.sparse.csgraph, then using union-find to label. + The parent array is then generated by walking through the tree. + """ + from scipy.sparse.csgraph import minimum_spanning_tree + + # explicitly cast connectivity to ensure safety + connectivity = connectivity.astype(np.float64, copy=False) + + # Ensure zero distances aren't ignored by setting them to "epsilon" + epsilon_value = np.finfo(dtype=connectivity.data.dtype).eps + connectivity.data[connectivity.data == 0] = epsilon_value + + # Use scipy.sparse.csgraph to generate a minimum spanning tree + mst = minimum_spanning_tree(connectivity.tocsr()) + + # Convert the graph to scipy.cluster.hierarchy array format + mst = mst.tocoo() + + # Undo the epsilon values + mst.data[mst.data == epsilon_value] = 0 + + mst_array = np.vstack([mst.row, mst.col, mst.data]).T + + # Sort edges of the min_spanning_tree by weight + mst_array = mst_array[np.argsort(mst_array.T[2], kind="mergesort"), :] + + # Convert edge list into standard hierarchical clustering format + single_linkage_tree = _hierarchical._single_linkage_label(mst_array) + children_ = single_linkage_tree[:, :2].astype(int) + + # Compute parents + parent = np.arange(n_nodes, dtype=np.intp) + for i, (left, right) in enumerate(children_, n_samples): + if n_clusters is not None and i >= n_nodes: + break + if left < n_nodes: + parent[left] = i + if right < n_nodes: + parent[right] = i + + if return_distance: + distances = single_linkage_tree[:, 2] + return children_, n_connected_components, n_samples, parent, distances + return children_, n_connected_components, n_samples, parent + + +############################################################################### +# Hierarchical tree building functions + + +@validate_params( + { + "X": ["array-like"], + "connectivity": ["array-like", "sparse matrix", None], + "n_clusters": [Interval(Integral, 1, None, closed="left"), None], + "return_distance": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def ward_tree(X, *, connectivity=None, n_clusters=None, return_distance=False): + """Ward clustering based on a Feature matrix. + + Recursively merges the pair of clusters that minimally increases + within-cluster variance. + + The inertia matrix uses a Heapq-based representation. + + This is the structured version, that takes into account some topological + structure between samples. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Feature matrix representing `n_samples` samples to be clustered. + + connectivity : {array-like, sparse matrix}, default=None + Connectivity matrix. Defines for each sample the neighboring samples + following a given structure of the data. The matrix is assumed to + be symmetric and only the upper triangular half is used. + Default is None, i.e, the Ward algorithm is unstructured. + + n_clusters : int, default=None + `n_clusters` should be less than `n_samples`. Stop early the + construction of the tree at `n_clusters.` This is useful to decrease + computation time if the number of clusters is not small compared to the + number of samples. In this case, the complete tree is not computed, thus + the 'children' output is of limited use, and the 'parents' output should + rather be used. This option is valid only when specifying a connectivity + matrix. + + return_distance : bool, default=False + If `True`, return the distance between the clusters. + + Returns + ------- + children : ndarray of shape (n_nodes-1, 2) + The children of each non-leaf node. Values less than `n_samples` + correspond to leaves of the tree which are the original samples. + A node `i` greater than or equal to `n_samples` is a non-leaf + node and has children `children_[i - n_samples]`. Alternatively + at the i-th iteration, children[i][0] and children[i][1] + are merged to form node `n_samples + i`. + + n_connected_components : int + The number of connected components in the graph. + + n_leaves : int + The number of leaves in the tree. + + parents : ndarray of shape (n_nodes,) or None + The parent of each node. Only returned when a connectivity matrix + is specified, elsewhere 'None' is returned. + + distances : ndarray of shape (n_nodes-1,) + Only returned if `return_distance` is set to `True` (for compatibility). + The distances between the centers of the nodes. `distances[i]` + corresponds to a weighted Euclidean distance between + the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to + leaves of the tree, then `distances[i]` is their unweighted Euclidean + distance. Distances are updated in the following way + (from scipy.hierarchy.linkage): + + The new entry :math:`d(u,v)` is computed as follows, + + .. math:: + + d(u,v) = \\sqrt{\\frac{|v|+|s|} + {T}d(v,s)^2 + + \\frac{|v|+|t|} + {T}d(v,t)^2 + - \\frac{|v|} + {T}d(s,t)^2} + + where :math:`u` is the newly joined cluster consisting of + clusters :math:`s` and :math:`t`, :math:`v` is an unused + cluster in the forest, :math:`T=|v|+|s|+|t|`, and + :math:`|*|` is the cardinality of its argument. This is also + known as the incremental algorithm. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import ward_tree + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [4, 2], [4, 4], [4, 0]]) + >>> children, n_connected_components, n_leaves, parents = ward_tree(X) + >>> children + array([[0, 1], + [3, 5], + [2, 6], + [4, 7], + [8, 9]]) + >>> n_connected_components + 1 + >>> n_leaves + 6 + """ + X = np.asarray(X) + if X.ndim == 1: + X = np.reshape(X, (-1, 1)) + n_samples, n_features = X.shape + + if connectivity is None: + from scipy.cluster import hierarchy # imports PIL + + if n_clusters is not None: + warnings.warn( + ( + "Partial build of the tree is implemented " + "only for structured clustering (i.e. with " + "explicit connectivity). The algorithm " + "will build the full tree and only " + "retain the lower branches required " + "for the specified number of clusters" + ), + stacklevel=2, + ) + X = np.require(X, requirements="W") + out = hierarchy.ward(X) + children_ = out[:, :2].astype(np.intp) + + if return_distance: + distances = out[:, 2] + return children_, 1, n_samples, None, distances + else: + return children_, 1, n_samples, None + + connectivity, n_connected_components = _fix_connectivity( + X, connectivity, affinity="euclidean" + ) + if n_clusters is None: + n_nodes = 2 * n_samples - 1 + else: + if n_clusters > n_samples: + raise ValueError( + "Cannot provide more clusters than samples. " + "%i n_clusters was asked, and there are %i " + "samples." % (n_clusters, n_samples) + ) + n_nodes = 2 * n_samples - n_clusters + + # create inertia matrix + coord_row = [] + coord_col = [] + A = [] + for ind, row in enumerate(connectivity.rows): + A.append(row) + # We keep only the upper triangular for the moments + # Generator expressions are faster than arrays on the following + row = [i for i in row if i < ind] + coord_row.extend( + len(row) + * [ + ind, + ] + ) + coord_col.extend(row) + + coord_row = np.array(coord_row, dtype=np.intp, order="C") + coord_col = np.array(coord_col, dtype=np.intp, order="C") + + # build moments as a list + moments_1 = np.zeros(n_nodes, order="C") + moments_1[:n_samples] = 1 + moments_2 = np.zeros((n_nodes, n_features), order="C") + moments_2[:n_samples] = X + inertia = np.empty(len(coord_row), dtype=np.float64, order="C") + _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, inertia) + inertia = list(zip(inertia, coord_row, coord_col)) + heapify(inertia) + + # prepare the main fields + parent = np.arange(n_nodes, dtype=np.intp) + used_node = np.ones(n_nodes, dtype=bool) + children = [] + if return_distance: + distances = np.empty(n_nodes - n_samples) + + not_visited = np.empty(n_nodes, dtype=bool, order="C") + + # recursive merge loop + for k in range(n_samples, n_nodes): + # identify the merge + while True: + inert, i, j = heappop(inertia) + if used_node[i] and used_node[j]: + break + parent[i], parent[j] = k, k + children.append((i, j)) + used_node[i] = used_node[j] = False + if return_distance: # store inertia value + distances[k - n_samples] = inert + + # update the moments + moments_1[k] = moments_1[i] + moments_1[j] + moments_2[k] = moments_2[i] + moments_2[j] + + # update the structure matrix A and the inertia matrix + coord_col = [] + not_visited.fill(1) + not_visited[k] = 0 + _hierarchical._get_parents(A[i], coord_col, parent, not_visited) + _hierarchical._get_parents(A[j], coord_col, parent, not_visited) + # List comprehension is faster than a for loop + [A[col].append(k) for col in coord_col] + A.append(coord_col) + coord_col = np.array(coord_col, dtype=np.intp, order="C") + coord_row = np.empty(coord_col.shape, dtype=np.intp, order="C") + coord_row.fill(k) + n_additions = len(coord_row) + ini = np.empty(n_additions, dtype=np.float64, order="C") + + _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, ini) + + # List comprehension is faster than a for loop + [heappush(inertia, (ini[idx], k, coord_col[idx])) for idx in range(n_additions)] + + # Separate leaves in children (empty lists up to now) + n_leaves = n_samples + # sort children to get consistent output with unstructured version + children = [c[::-1] for c in children] + children = np.array(children) # return numpy array for efficient caching + + if return_distance: + # 2 is scaling factor to compare w/ unstructured version + distances = np.sqrt(2.0 * distances) + return children, n_connected_components, n_leaves, parent, distances + else: + return children, n_connected_components, n_leaves, parent + + +# single average and complete linkage +def linkage_tree( + X, + connectivity=None, + n_clusters=None, + linkage="complete", + affinity="euclidean", + return_distance=False, +): + """Linkage agglomerative clustering based on a Feature matrix. + + The inertia matrix uses a Heapq-based representation. + + This is the structured version, that takes into account some topological + structure between samples. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Feature matrix representing `n_samples` samples to be clustered. + + connectivity : sparse matrix, default=None + Connectivity matrix. Defines for each sample the neighboring samples + following a given structure of the data. The matrix is assumed to + be symmetric and only the upper triangular half is used. + Default is `None`, i.e, the Ward algorithm is unstructured. + + n_clusters : int, default=None + Stop early the construction of the tree at `n_clusters`. This is + useful to decrease computation time if the number of clusters is + not small compared to the number of samples. In this case, the + complete tree is not computed, thus the 'children' output is of + limited use, and the 'parents' output should rather be used. + This option is valid only when specifying a connectivity matrix. + + linkage : {"average", "complete", "single"}, default="complete" + Which linkage criteria to use. The linkage criterion determines which + distance to use between sets of observation. + - "average" uses the average of the distances of each observation of + the two sets. + - "complete" or maximum linkage uses the maximum distances between + all observations of the two sets. + - "single" uses the minimum of the distances between all + observations of the two sets. + + affinity : str or callable, default='euclidean' + Which metric to use. Can be 'euclidean', 'manhattan', or any + distance known to paired distance (see metric.pairwise). + + return_distance : bool, default=False + Whether or not to return the distances between the clusters. + + Returns + ------- + children : ndarray of shape (n_nodes-1, 2) + The children of each non-leaf node. Values less than `n_samples` + correspond to leaves of the tree which are the original samples. + A node `i` greater than or equal to `n_samples` is a non-leaf + node and has children `children_[i - n_samples]`. Alternatively + at the i-th iteration, children[i][0] and children[i][1] + are merged to form node `n_samples + i`. + + n_connected_components : int + The number of connected components in the graph. + + n_leaves : int + The number of leaves in the tree. + + parents : ndarray of shape (n_nodes, ) or None + The parent of each node. Only returned when a connectivity matrix + is specified, elsewhere 'None' is returned. + + distances : ndarray of shape (n_nodes-1,) + Returned when `return_distance` is set to `True`. + + distances[i] refers to the distance between children[i][0] and + children[i][1] when they are merged. + + See Also + -------- + ward_tree : Hierarchical clustering with ward linkage. + """ + X = np.asarray(X) + if X.ndim == 1: + X = np.reshape(X, (-1, 1)) + n_samples, n_features = X.shape + + linkage_choices = { + "complete": _hierarchical.max_merge, + "average": _hierarchical.average_merge, + "single": None, + } # Single linkage is handled differently + try: + join_func = linkage_choices[linkage] + except KeyError as e: + raise ValueError( + "Unknown linkage option, linkage should be one of %s, but %s was given" + % (linkage_choices.keys(), linkage) + ) from e + + if affinity == "cosine" and np.any(~np.any(X, axis=1)): + raise ValueError("Cosine affinity cannot be used when X contains zero vectors") + + if connectivity is None: + from scipy.cluster import hierarchy # imports PIL + + if n_clusters is not None: + warnings.warn( + ( + "Partial build of the tree is implemented " + "only for structured clustering (i.e. with " + "explicit connectivity). The algorithm " + "will build the full tree and only " + "retain the lower branches required " + "for the specified number of clusters" + ), + stacklevel=2, + ) + + if affinity == "precomputed": + # for the linkage function of hierarchy to work on precomputed + # data, provide as first argument an ndarray of the shape returned + # by sklearn.metrics.pairwise_distances. + if X.shape[0] != X.shape[1]: + raise ValueError( + f"Distance matrix should be square, got matrix of shape {X.shape}" + ) + i, j = np.triu_indices(X.shape[0], k=1) + X = X[i, j] + elif affinity == "l2": + # Translate to something understood by scipy + affinity = "euclidean" + elif affinity in ("l1", "manhattan"): + affinity = "cityblock" + elif callable(affinity): + X = affinity(X) + i, j = np.triu_indices(X.shape[0], k=1) + X = X[i, j] + if ( + linkage == "single" + and affinity != "precomputed" + and not callable(affinity) + and affinity in METRIC_MAPPING64 + ): + # We need the fast cythonized metric from neighbors + dist_metric = DistanceMetric.get_metric(affinity) + + # The Cython routines used require contiguous arrays + X = np.ascontiguousarray(X, dtype=np.double) + + mst = _hierarchical.mst_linkage_core(X, dist_metric) + # Sort edges of the min_spanning_tree by weight + mst = mst[np.argsort(mst.T[2], kind="mergesort"), :] + + # Convert edge list into standard hierarchical clustering format + out = _hierarchical.single_linkage_label(mst) + else: + out = hierarchy.linkage(X, method=linkage, metric=affinity) + children_ = out[:, :2].astype(int, copy=False) + + if return_distance: + distances = out[:, 2] + return children_, 1, n_samples, None, distances + return children_, 1, n_samples, None + + connectivity, n_connected_components = _fix_connectivity( + X, connectivity, affinity=affinity + ) + connectivity = connectivity.tocoo() + # Put the diagonal to zero + diag_mask = connectivity.row != connectivity.col + connectivity.row = connectivity.row[diag_mask] + connectivity.col = connectivity.col[diag_mask] + connectivity.data = connectivity.data[diag_mask] + del diag_mask + + if affinity == "precomputed": + distances = X[connectivity.row, connectivity.col].astype(np.float64, copy=False) + else: + # FIXME We compute all the distances, while we could have only computed + # the "interesting" distances + distances = paired_distances( + X[connectivity.row], X[connectivity.col], metric=affinity + ) + connectivity.data = distances + + if n_clusters is None: + n_nodes = 2 * n_samples - 1 + else: + assert n_clusters <= n_samples + n_nodes = 2 * n_samples - n_clusters + + if linkage == "single": + return _single_linkage_tree( + connectivity, + n_samples, + n_nodes, + n_clusters, + n_connected_components, + return_distance, + ) + + if return_distance: + distances = np.empty(n_nodes - n_samples) + # create inertia heap and connection matrix + A = np.empty(n_nodes, dtype=object) + inertia = list() + + # LIL seems to the best format to access the rows quickly, + # without the numpy overhead of slicing CSR indices and data. + connectivity = connectivity.tolil() + # We are storing the graph in a list of IntFloatDict + for ind, (data, row) in enumerate(zip(connectivity.data, connectivity.rows)): + A[ind] = IntFloatDict( + np.asarray(row, dtype=np.intp), np.asarray(data, dtype=np.float64) + ) + # We keep only the upper triangular for the heap + # Generator expressions are faster than arrays on the following + inertia.extend( + _hierarchical.WeightedEdge(d, ind, r) for r, d in zip(row, data) if r < ind + ) + del connectivity + + heapify(inertia) + + # prepare the main fields + parent = np.arange(n_nodes, dtype=np.intp) + used_node = np.ones(n_nodes, dtype=np.intp) + children = [] + + # recursive merge loop + for k in range(n_samples, n_nodes): + # identify the merge + while True: + edge = heappop(inertia) + if used_node[edge.a] and used_node[edge.b]: + break + i = edge.a + j = edge.b + + if return_distance: + # store distances + distances[k - n_samples] = edge.weight + + parent[i] = parent[j] = k + children.append((i, j)) + # Keep track of the number of elements per cluster + n_i = used_node[i] + n_j = used_node[j] + used_node[k] = n_i + n_j + used_node[i] = used_node[j] = False + + # update the structure matrix A and the inertia matrix + # a clever 'min', or 'max' operation between A[i] and A[j] + coord_col = join_func(A[i], A[j], used_node, n_i, n_j) + for col, d in coord_col: + A[col].append(k, d) + # Here we use the information from coord_col (containing the + # distances) to update the heap + heappush(inertia, _hierarchical.WeightedEdge(d, k, col)) + A[k] = coord_col + # Clear A[i] and A[j] to save memory + A[i] = A[j] = 0 + + # Separate leaves in children (empty lists up to now) + n_leaves = n_samples + + # # return numpy array for efficient caching + children = np.array(children)[:, ::-1] + + if return_distance: + return children, n_connected_components, n_leaves, parent, distances + return children, n_connected_components, n_leaves, parent + + +# Matching names to tree-building strategies +def _complete_linkage(*args, **kwargs): + kwargs["linkage"] = "complete" + return linkage_tree(*args, **kwargs) + + +def _average_linkage(*args, **kwargs): + kwargs["linkage"] = "average" + return linkage_tree(*args, **kwargs) + + +def _single_linkage(*args, **kwargs): + kwargs["linkage"] = "single" + return linkage_tree(*args, **kwargs) + + +_TREE_BUILDERS = dict( + ward=ward_tree, + complete=_complete_linkage, + average=_average_linkage, + single=_single_linkage, +) + +############################################################################### +# Functions for cutting hierarchical clustering tree + + +def _hc_cut(n_clusters, children, n_leaves): + """Function cutting the ward tree for a given number of clusters. + + Parameters + ---------- + n_clusters : int or ndarray + The number of clusters to form. + + children : ndarray of shape (n_nodes-1, 2) + The children of each non-leaf node. Values less than `n_samples` + correspond to leaves of the tree which are the original samples. + A node `i` greater than or equal to `n_samples` is a non-leaf + node and has children `children_[i - n_samples]`. Alternatively + at the i-th iteration, children[i][0] and children[i][1] + are merged to form node `n_samples + i`. + + n_leaves : int + Number of leaves of the tree. + + Returns + ------- + labels : array [n_samples] + Cluster labels for each point. + """ + if n_clusters > n_leaves: + raise ValueError( + "Cannot extract more clusters than samples: " + "%s clusters where given for a tree with %s leaves." + % (n_clusters, n_leaves) + ) + # In this function, we store nodes as a heap to avoid recomputing + # the max of the nodes: the first element is always the smallest + # We use negated indices as heaps work on smallest elements, and we + # are interested in largest elements + # children[-1] is the root of the tree + nodes = [-(max(children[-1]) + 1)] + for _ in range(n_clusters - 1): + # As we have a heap, nodes[0] is the smallest element + these_children = children[-nodes[0] - n_leaves] + # Insert the 2 children and remove the largest node + heappush(nodes, -these_children[0]) + heappushpop(nodes, -these_children[1]) + label = np.zeros(n_leaves, dtype=np.intp) + for i, node in enumerate(nodes): + label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i + return label + + +############################################################################### + + +class AgglomerativeClustering(ClusterMixin, BaseEstimator): + """ + Agglomerative Clustering. + + Recursively merges pair of clusters of sample data; uses linkage distance. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_clusters : int or None, default=2 + The number of clusters to find. It must be ``None`` if + ``distance_threshold`` is not ``None``. + + metric : str or callable, default="euclidean" + Metric used to compute the linkage. Can be "euclidean", "l1", "l2", + "manhattan", "cosine", or "precomputed". If linkage is "ward", only + "euclidean" is accepted. If "precomputed", a distance matrix is needed + as input for the fit method. + + .. versionadded:: 1.2 + + .. deprecated:: 1.4 + `metric=None` is deprecated in 1.4 and will be removed in 1.6. + Let `metric` be the default value (i.e. `"euclidean"`) instead. + + memory : str or object with the joblib.Memory interface, default=None + Used to cache the output of the computation of the tree. + By default, no caching is done. If a string is given, it is the + path to the caching directory. + + connectivity : array-like or callable, default=None + Connectivity matrix. Defines for each sample the neighboring + samples following a given structure of the data. + This can be a connectivity matrix itself or a callable that transforms + the data into a connectivity matrix, such as derived from + `kneighbors_graph`. Default is ``None``, i.e, the + hierarchical clustering algorithm is unstructured. + + compute_full_tree : 'auto' or bool, default='auto' + Stop early the construction of the tree at ``n_clusters``. This is + useful to decrease computation time if the number of clusters is not + small compared to the number of samples. This option is useful only + when specifying a connectivity matrix. Note also that when varying the + number of clusters and using caching, it may be advantageous to compute + the full tree. It must be ``True`` if ``distance_threshold`` is not + ``None``. By default `compute_full_tree` is "auto", which is equivalent + to `True` when `distance_threshold` is not `None` or that `n_clusters` + is inferior to the maximum between 100 or `0.02 * n_samples`. + Otherwise, "auto" is equivalent to `False`. + + linkage : {'ward', 'complete', 'average', 'single'}, default='ward' + Which linkage criterion to use. The linkage criterion determines which + distance to use between sets of observation. The algorithm will merge + the pairs of cluster that minimize this criterion. + + - 'ward' minimizes the variance of the clusters being merged. + - 'average' uses the average of the distances of each observation of + the two sets. + - 'complete' or 'maximum' linkage uses the maximum distances between + all observations of the two sets. + - 'single' uses the minimum of the distances between all observations + of the two sets. + + .. versionadded:: 0.20 + Added the 'single' option + + distance_threshold : float, default=None + The linkage distance threshold at or above which clusters will not be + merged. If not ``None``, ``n_clusters`` must be ``None`` and + ``compute_full_tree`` must be ``True``. + + .. versionadded:: 0.21 + + compute_distances : bool, default=False + Computes distances between clusters even if `distance_threshold` is not + used. This can be used to make dendrogram visualization, but introduces + a computational and memory overhead. + + .. versionadded:: 0.24 + + Attributes + ---------- + n_clusters_ : int + The number of clusters found by the algorithm. If + ``distance_threshold=None``, it will be equal to the given + ``n_clusters``. + + labels_ : ndarray of shape (n_samples) + Cluster labels for each point. + + n_leaves_ : int + Number of leaves in the hierarchical tree. + + n_connected_components_ : int + The estimated number of connected components in the graph. + + .. versionadded:: 0.21 + ``n_connected_components_`` was added to replace ``n_components_``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + children_ : array-like of shape (n_samples-1, 2) + The children of each non-leaf node. Values less than `n_samples` + correspond to leaves of the tree which are the original samples. + A node `i` greater than or equal to `n_samples` is a non-leaf + node and has children `children_[i - n_samples]`. Alternatively + at the i-th iteration, children[i][0] and children[i][1] + are merged to form node `n_samples + i`. + + distances_ : array-like of shape (n_nodes-1,) + Distances between nodes in the corresponding place in `children_`. + Only computed if `distance_threshold` is used or `compute_distances` + is set to `True`. + + See Also + -------- + FeatureAgglomeration : Agglomerative clustering but for features instead of + samples. + ward_tree : Hierarchical clustering with ward linkage. + + Examples + -------- + >>> from sklearn.cluster import AgglomerativeClustering + >>> import numpy as np + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [4, 2], [4, 4], [4, 0]]) + >>> clustering = AgglomerativeClustering().fit(X) + >>> clustering + AgglomerativeClustering() + >>> clustering.labels_ + array([1, 1, 1, 0, 0, 0]) + """ + + _parameter_constraints: dict = { + "n_clusters": [Interval(Integral, 1, None, closed="left"), None], + "metric": [ + StrOptions(set(_VALID_METRICS) | {"precomputed"}), + callable, + Hidden(None), + ], + "memory": [str, HasMethods("cache"), None], + "connectivity": ["array-like", callable, None], + "compute_full_tree": [StrOptions({"auto"}), "boolean"], + "linkage": [StrOptions(set(_TREE_BUILDERS.keys()))], + "distance_threshold": [Interval(Real, 0, None, closed="left"), None], + "compute_distances": ["boolean"], + } + + def __init__( + self, + n_clusters=2, + *, + metric="euclidean", + memory=None, + connectivity=None, + compute_full_tree="auto", + linkage="ward", + distance_threshold=None, + compute_distances=False, + ): + self.n_clusters = n_clusters + self.distance_threshold = distance_threshold + self.memory = memory + self.connectivity = connectivity + self.compute_full_tree = compute_full_tree + self.linkage = linkage + self.metric = metric + self.compute_distances = compute_distances + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the hierarchical clustering from features, or distance matrix. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) or \ + (n_samples, n_samples) + Training instances to cluster, or distances between instances if + ``metric='precomputed'``. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Returns the fitted instance. + """ + X = self._validate_data(X, ensure_min_samples=2) + return self._fit(X) + + def _fit(self, X): + """Fit without validation + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples) + Training instances to cluster, or distances between instances if + ``affinity='precomputed'``. + + Returns + ------- + self : object + Returns the fitted instance. + """ + memory = check_memory(self.memory) + + # TODO(1.6): remove in 1.6 + if self.metric is None: + warnings.warn( + ( + "`metric=None` is deprecated in version 1.4 and will be removed in " + "version 1.6. Let `metric` be the default value " + "(i.e. `'euclidean'`) instead." + ), + FutureWarning, + ) + self._metric = "euclidean" + else: + self._metric = self.metric + + if not ((self.n_clusters is None) ^ (self.distance_threshold is None)): + raise ValueError( + "Exactly one of n_clusters and " + "distance_threshold has to be set, and the other " + "needs to be None." + ) + + if self.distance_threshold is not None and not self.compute_full_tree: + raise ValueError( + "compute_full_tree must be True if distance_threshold is set." + ) + + if self.linkage == "ward" and self._metric != "euclidean": + raise ValueError( + f"{self._metric} was provided as metric. Ward can only " + "work with euclidean distances." + ) + + tree_builder = _TREE_BUILDERS[self.linkage] + + connectivity = self.connectivity + if self.connectivity is not None: + if callable(self.connectivity): + connectivity = self.connectivity(X) + connectivity = check_array( + connectivity, accept_sparse=["csr", "coo", "lil"] + ) + + n_samples = len(X) + compute_full_tree = self.compute_full_tree + if self.connectivity is None: + compute_full_tree = True + if compute_full_tree == "auto": + if self.distance_threshold is not None: + compute_full_tree = True + else: + # Early stopping is likely to give a speed up only for + # a large number of clusters. The actual threshold + # implemented here is heuristic + compute_full_tree = self.n_clusters < max(100, 0.02 * n_samples) + n_clusters = self.n_clusters + if compute_full_tree: + n_clusters = None + + # Construct the tree + kwargs = {} + if self.linkage != "ward": + kwargs["linkage"] = self.linkage + kwargs["affinity"] = self._metric + + distance_threshold = self.distance_threshold + + return_distance = (distance_threshold is not None) or self.compute_distances + + out = memory.cache(tree_builder)( + X, + connectivity=connectivity, + n_clusters=n_clusters, + return_distance=return_distance, + **kwargs, + ) + (self.children_, self.n_connected_components_, self.n_leaves_, parents) = out[ + :4 + ] + + if return_distance: + self.distances_ = out[-1] + + if self.distance_threshold is not None: # distance_threshold is used + self.n_clusters_ = ( + np.count_nonzero(self.distances_ >= distance_threshold) + 1 + ) + else: # n_clusters is used + self.n_clusters_ = self.n_clusters + + # Cut the tree + if compute_full_tree: + self.labels_ = _hc_cut(self.n_clusters_, self.children_, self.n_leaves_) + else: + labels = _hierarchical.hc_get_heads(parents, copy=False) + # copy to avoid holding a reference on the original array + labels = np.copy(labels[:n_samples]) + # Reassign cluster numbers + self.labels_ = np.searchsorted(np.unique(labels), labels) + return self + + def fit_predict(self, X, y=None): + """Fit and return the result of each sample's clustering assignment. + + In addition to fitting, this method also return the result of the + clustering assignment for each sample in the training set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or \ + (n_samples, n_samples) + Training instances to cluster, or distances between instances if + ``affinity='precomputed'``. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Cluster labels. + """ + return super().fit_predict(X, y) + + +class FeatureAgglomeration( + ClassNamePrefixFeaturesOutMixin, AgglomerativeClustering, AgglomerationTransform +): + """Agglomerate features. + + Recursively merges pair of clusters of features. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_clusters : int or None, default=2 + The number of clusters to find. It must be ``None`` if + ``distance_threshold`` is not ``None``. + + metric : str or callable, default="euclidean" + Metric used to compute the linkage. Can be "euclidean", "l1", "l2", + "manhattan", "cosine", or "precomputed". If linkage is "ward", only + "euclidean" is accepted. If "precomputed", a distance matrix is needed + as input for the fit method. + + .. versionadded:: 1.2 + + .. deprecated:: 1.4 + `metric=None` is deprecated in 1.4 and will be removed in 1.6. + Let `metric` be the default value (i.e. `"euclidean"`) instead. + + memory : str or object with the joblib.Memory interface, default=None + Used to cache the output of the computation of the tree. + By default, no caching is done. If a string is given, it is the + path to the caching directory. + + connectivity : array-like or callable, default=None + Connectivity matrix. Defines for each feature the neighboring + features following a given structure of the data. + This can be a connectivity matrix itself or a callable that transforms + the data into a connectivity matrix, such as derived from + `kneighbors_graph`. Default is `None`, i.e, the + hierarchical clustering algorithm is unstructured. + + compute_full_tree : 'auto' or bool, default='auto' + Stop early the construction of the tree at `n_clusters`. This is useful + to decrease computation time if the number of clusters is not small + compared to the number of features. This option is useful only when + specifying a connectivity matrix. Note also that when varying the + number of clusters and using caching, it may be advantageous to compute + the full tree. It must be ``True`` if ``distance_threshold`` is not + ``None``. By default `compute_full_tree` is "auto", which is equivalent + to `True` when `distance_threshold` is not `None` or that `n_clusters` + is inferior to the maximum between 100 or `0.02 * n_samples`. + Otherwise, "auto" is equivalent to `False`. + + linkage : {"ward", "complete", "average", "single"}, default="ward" + Which linkage criterion to use. The linkage criterion determines which + distance to use between sets of features. The algorithm will merge + the pairs of cluster that minimize this criterion. + + - "ward" minimizes the variance of the clusters being merged. + - "complete" or maximum linkage uses the maximum distances between + all features of the two sets. + - "average" uses the average of the distances of each feature of + the two sets. + - "single" uses the minimum of the distances between all features + of the two sets. + + pooling_func : callable, default=np.mean + This combines the values of agglomerated features into a single + value, and should accept an array of shape [M, N] and the keyword + argument `axis=1`, and reduce it to an array of size [M]. + + distance_threshold : float, default=None + The linkage distance threshold at or above which clusters will not be + merged. If not ``None``, ``n_clusters`` must be ``None`` and + ``compute_full_tree`` must be ``True``. + + .. versionadded:: 0.21 + + compute_distances : bool, default=False + Computes distances between clusters even if `distance_threshold` is not + used. This can be used to make dendrogram visualization, but introduces + a computational and memory overhead. + + .. versionadded:: 0.24 + + Attributes + ---------- + n_clusters_ : int + The number of clusters found by the algorithm. If + ``distance_threshold=None``, it will be equal to the given + ``n_clusters``. + + labels_ : array-like of (n_features,) + Cluster labels for each feature. + + n_leaves_ : int + Number of leaves in the hierarchical tree. + + n_connected_components_ : int + The estimated number of connected components in the graph. + + .. versionadded:: 0.21 + ``n_connected_components_`` was added to replace ``n_components_``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + children_ : array-like of shape (n_nodes-1, 2) + The children of each non-leaf node. Values less than `n_features` + correspond to leaves of the tree which are the original samples. + A node `i` greater than or equal to `n_features` is a non-leaf + node and has children `children_[i - n_features]`. Alternatively + at the i-th iteration, children[i][0] and children[i][1] + are merged to form node `n_features + i`. + + distances_ : array-like of shape (n_nodes-1,) + Distances between nodes in the corresponding place in `children_`. + Only computed if `distance_threshold` is used or `compute_distances` + is set to `True`. + + See Also + -------- + AgglomerativeClustering : Agglomerative clustering samples instead of + features. + ward_tree : Hierarchical clustering with ward linkage. + + Examples + -------- + >>> import numpy as np + >>> from sklearn import datasets, cluster + >>> digits = datasets.load_digits() + >>> images = digits.images + >>> X = np.reshape(images, (len(images), -1)) + >>> agglo = cluster.FeatureAgglomeration(n_clusters=32) + >>> agglo.fit(X) + FeatureAgglomeration(n_clusters=32) + >>> X_reduced = agglo.transform(X) + >>> X_reduced.shape + (1797, 32) + """ + + _parameter_constraints: dict = { + "n_clusters": [Interval(Integral, 1, None, closed="left"), None], + "metric": [ + StrOptions(set(_VALID_METRICS) | {"precomputed"}), + callable, + Hidden(None), + ], + "memory": [str, HasMethods("cache"), None], + "connectivity": ["array-like", callable, None], + "compute_full_tree": [StrOptions({"auto"}), "boolean"], + "linkage": [StrOptions(set(_TREE_BUILDERS.keys()))], + "pooling_func": [callable], + "distance_threshold": [Interval(Real, 0, None, closed="left"), None], + "compute_distances": ["boolean"], + } + + def __init__( + self, + n_clusters=2, + *, + metric="euclidean", + memory=None, + connectivity=None, + compute_full_tree="auto", + linkage="ward", + pooling_func=np.mean, + distance_threshold=None, + compute_distances=False, + ): + super().__init__( + n_clusters=n_clusters, + memory=memory, + connectivity=connectivity, + compute_full_tree=compute_full_tree, + linkage=linkage, + metric=metric, + distance_threshold=distance_threshold, + compute_distances=compute_distances, + ) + self.pooling_func = pooling_func + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the hierarchical clustering on the data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Returns the transformer. + """ + X = self._validate_data(X, ensure_min_features=2) + super()._fit(X.T) + self._n_features_out = self.n_clusters_ + return self + + @property + def fit_predict(self): + """Fit and return the result of each sample's clustering assignment.""" + raise AttributeError diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_bicluster.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_bicluster.py new file mode 100644 index 0000000000000000000000000000000000000000..65280c06319d99a48f727fc3c3267def7cdc740a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_bicluster.py @@ -0,0 +1,622 @@ +"""Spectral biclustering algorithms.""" +# Authors : Kemal Eren +# License: BSD 3 clause + +from abc import ABCMeta, abstractmethod +from numbers import Integral + +import numpy as np +from scipy.linalg import norm +from scipy.sparse import dia_matrix, issparse +from scipy.sparse.linalg import eigsh, svds + +from ..base import BaseEstimator, BiclusterMixin, _fit_context +from ..utils import check_random_state, check_scalar +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import make_nonnegative, randomized_svd, safe_sparse_dot +from ..utils.validation import assert_all_finite +from ._kmeans import KMeans, MiniBatchKMeans + +__all__ = ["SpectralCoclustering", "SpectralBiclustering"] + + +def _scale_normalize(X): + """Normalize ``X`` by scaling rows and columns independently. + + Returns the normalized matrix and the row and column scaling + factors. + """ + X = make_nonnegative(X) + row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze() + col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze() + row_diag = np.where(np.isnan(row_diag), 0, row_diag) + col_diag = np.where(np.isnan(col_diag), 0, col_diag) + if issparse(X): + n_rows, n_cols = X.shape + r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows)) + c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols)) + an = r * X * c + else: + an = row_diag[:, np.newaxis] * X * col_diag + return an, row_diag, col_diag + + +def _bistochastic_normalize(X, max_iter=1000, tol=1e-5): + """Normalize rows and columns of ``X`` simultaneously so that all + rows sum to one constant and all columns sum to a different + constant. + """ + # According to paper, this can also be done more efficiently with + # deviation reduction and balancing algorithms. + X = make_nonnegative(X) + X_scaled = X + for _ in range(max_iter): + X_new, _, _ = _scale_normalize(X_scaled) + if issparse(X): + dist = norm(X_scaled.data - X.data) + else: + dist = norm(X_scaled - X_new) + X_scaled = X_new + if dist is not None and dist < tol: + break + return X_scaled + + +def _log_normalize(X): + """Normalize ``X`` according to Kluger's log-interactions scheme.""" + X = make_nonnegative(X, min_value=1) + if issparse(X): + raise ValueError( + "Cannot compute log of a sparse matrix," + " because log(x) diverges to -infinity as x" + " goes to 0." + ) + L = np.log(X) + row_avg = L.mean(axis=1)[:, np.newaxis] + col_avg = L.mean(axis=0) + avg = L.mean() + return L - row_avg - col_avg + avg + + +class BaseSpectral(BiclusterMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for spectral biclustering.""" + + _parameter_constraints: dict = { + "svd_method": [StrOptions({"randomized", "arpack"})], + "n_svd_vecs": [Interval(Integral, 0, None, closed="left"), None], + "mini_batch": ["boolean"], + "init": [StrOptions({"k-means++", "random"}), np.ndarray], + "n_init": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + } + + @abstractmethod + def __init__( + self, + n_clusters=3, + svd_method="randomized", + n_svd_vecs=None, + mini_batch=False, + init="k-means++", + n_init=10, + random_state=None, + ): + self.n_clusters = n_clusters + self.svd_method = svd_method + self.n_svd_vecs = n_svd_vecs + self.mini_batch = mini_batch + self.init = init + self.n_init = n_init + self.random_state = random_state + + @abstractmethod + def _check_parameters(self, n_samples): + """Validate parameters depending on the input data.""" + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Create a biclustering for X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + SpectralBiclustering instance. + """ + X = self._validate_data(X, accept_sparse="csr", dtype=np.float64) + self._check_parameters(X.shape[0]) + self._fit(X) + return self + + def _svd(self, array, n_components, n_discard): + """Returns first `n_components` left and right singular + vectors u and v, discarding the first `n_discard`. + """ + if self.svd_method == "randomized": + kwargs = {} + if self.n_svd_vecs is not None: + kwargs["n_oversamples"] = self.n_svd_vecs + u, _, vt = randomized_svd( + array, n_components, random_state=self.random_state, **kwargs + ) + + elif self.svd_method == "arpack": + u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs) + if np.any(np.isnan(vt)): + # some eigenvalues of A * A.T are negative, causing + # sqrt() to be np.nan. This causes some vectors in vt + # to be np.nan. + A = safe_sparse_dot(array.T, array) + random_state = check_random_state(self.random_state) + # initialize with [-1,1] as in ARPACK + v0 = random_state.uniform(-1, 1, A.shape[0]) + _, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0) + vt = v.T + if np.any(np.isnan(u)): + A = safe_sparse_dot(array, array.T) + random_state = check_random_state(self.random_state) + # initialize with [-1,1] as in ARPACK + v0 = random_state.uniform(-1, 1, A.shape[0]) + _, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0) + + assert_all_finite(u) + assert_all_finite(vt) + u = u[:, n_discard:] + vt = vt[n_discard:] + return u, vt.T + + def _k_means(self, data, n_clusters): + if self.mini_batch: + model = MiniBatchKMeans( + n_clusters, + init=self.init, + n_init=self.n_init, + random_state=self.random_state, + ) + else: + model = KMeans( + n_clusters, + init=self.init, + n_init=self.n_init, + random_state=self.random_state, + ) + model.fit(data) + centroid = model.cluster_centers_ + labels = model.labels_ + return centroid, labels + + def _more_tags(self): + return { + "_xfail_checks": { + "check_estimators_dtypes": "raises nan error", + "check_fit2d_1sample": "_scale_normalize fails", + "check_fit2d_1feature": "raises apply_along_axis error", + "check_estimator_sparse_data": "does not fail gracefully", + "check_methods_subset_invariance": "empty array passed inside", + "check_dont_overwrite_parameters": "empty array passed inside", + "check_fit2d_predict1d": "empty array passed inside", + } + } + + +class SpectralCoclustering(BaseSpectral): + """Spectral Co-Clustering algorithm (Dhillon, 2001). + + Clusters rows and columns of an array `X` to solve the relaxed + normalized cut of the bipartite graph created from `X` as follows: + the edge between row vertex `i` and column vertex `j` has weight + `X[i, j]`. + + The resulting bicluster structure is block-diagonal, since each + row and each column belongs to exactly one bicluster. + + Supports sparse matrices, as long as they are nonnegative. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_clusters : int, default=3 + The number of biclusters to find. + + svd_method : {'randomized', 'arpack'}, default='randomized' + Selects the algorithm for finding singular vectors. May be + 'randomized' or 'arpack'. If 'randomized', use + :func:`sklearn.utils.extmath.randomized_svd`, which may be faster + for large matrices. If 'arpack', use + :func:`scipy.sparse.linalg.svds`, which is more accurate, but + possibly slower in some cases. + + n_svd_vecs : int, default=None + Number of vectors to use in calculating the SVD. Corresponds + to `ncv` when `svd_method=arpack` and `n_oversamples` when + `svd_method` is 'randomized`. + + mini_batch : bool, default=False + Whether to use mini-batch k-means, which is faster but may get + different results. + + init : {'k-means++', 'random'}, or ndarray of shape \ + (n_clusters, n_features), default='k-means++' + Method for initialization of k-means algorithm; defaults to + 'k-means++'. + + n_init : int, default=10 + Number of random initializations that are tried with the + k-means algorithm. + + If mini-batch k-means is used, the best initialization is + chosen and the algorithm runs once. Otherwise, the algorithm + is run for each initialization and the best solution chosen. + + random_state : int, RandomState instance, default=None + Used for randomizing the singular value decomposition and the k-means + initialization. Use an int to make the randomness deterministic. + See :term:`Glossary `. + + Attributes + ---------- + rows_ : array-like of shape (n_row_clusters, n_rows) + Results of the clustering. `rows[i, r]` is True if + cluster `i` contains row `r`. Available only after calling ``fit``. + + columns_ : array-like of shape (n_column_clusters, n_columns) + Results of the clustering, like `rows`. + + row_labels_ : array-like of shape (n_rows,) + The bicluster label of each row. + + column_labels_ : array-like of shape (n_cols,) + The bicluster label of each column. + + biclusters_ : tuple of two ndarrays + The tuple contains the `rows_` and `columns_` arrays. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SpectralBiclustering : Partitions rows and columns under the assumption + that the data has an underlying checkerboard structure. + + References + ---------- + * :doi:`Dhillon, Inderjit S, 2001. Co-clustering documents and words using + bipartite spectral graph partitioning. + <10.1145/502512.502550>` + + Examples + -------- + >>> from sklearn.cluster import SpectralCoclustering + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> clustering = SpectralCoclustering(n_clusters=2, random_state=0).fit(X) + >>> clustering.row_labels_ #doctest: +SKIP + array([0, 1, 1, 0, 0, 0], dtype=int32) + >>> clustering.column_labels_ #doctest: +SKIP + array([0, 0], dtype=int32) + >>> clustering + SpectralCoclustering(n_clusters=2, random_state=0) + """ + + _parameter_constraints: dict = { + **BaseSpectral._parameter_constraints, + "n_clusters": [Interval(Integral, 1, None, closed="left")], + } + + def __init__( + self, + n_clusters=3, + *, + svd_method="randomized", + n_svd_vecs=None, + mini_batch=False, + init="k-means++", + n_init=10, + random_state=None, + ): + super().__init__( + n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state + ) + + def _check_parameters(self, n_samples): + if self.n_clusters > n_samples: + raise ValueError( + f"n_clusters should be <= n_samples={n_samples}. Got" + f" {self.n_clusters} instead." + ) + + def _fit(self, X): + normalized_data, row_diag, col_diag = _scale_normalize(X) + n_sv = 1 + int(np.ceil(np.log2(self.n_clusters))) + u, v = self._svd(normalized_data, n_sv, n_discard=1) + z = np.vstack((row_diag[:, np.newaxis] * u, col_diag[:, np.newaxis] * v)) + + _, labels = self._k_means(z, self.n_clusters) + + n_rows = X.shape[0] + self.row_labels_ = labels[:n_rows] + self.column_labels_ = labels[n_rows:] + + self.rows_ = np.vstack([self.row_labels_ == c for c in range(self.n_clusters)]) + self.columns_ = np.vstack( + [self.column_labels_ == c for c in range(self.n_clusters)] + ) + + +class SpectralBiclustering(BaseSpectral): + """Spectral biclustering (Kluger, 2003). + + Partitions rows and columns under the assumption that the data has + an underlying checkerboard structure. For instance, if there are + two row partitions and three column partitions, each row will + belong to three biclusters, and each column will belong to two + biclusters. The outer product of the corresponding row and column + label vectors gives this checkerboard structure. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_clusters : int or tuple (n_row_clusters, n_column_clusters), default=3 + The number of row and column clusters in the checkerboard + structure. + + method : {'bistochastic', 'scale', 'log'}, default='bistochastic' + Method of normalizing and converting singular vectors into + biclusters. May be one of 'scale', 'bistochastic', or 'log'. + The authors recommend using 'log'. If the data is sparse, + however, log normalization will not work, which is why the + default is 'bistochastic'. + + .. warning:: + if `method='log'`, the data must not be sparse. + + n_components : int, default=6 + Number of singular vectors to check. + + n_best : int, default=3 + Number of best singular vectors to which to project the data + for clustering. + + svd_method : {'randomized', 'arpack'}, default='randomized' + Selects the algorithm for finding singular vectors. May be + 'randomized' or 'arpack'. If 'randomized', uses + :func:`~sklearn.utils.extmath.randomized_svd`, which may be faster + for large matrices. If 'arpack', uses + `scipy.sparse.linalg.svds`, which is more accurate, but + possibly slower in some cases. + + n_svd_vecs : int, default=None + Number of vectors to use in calculating the SVD. Corresponds + to `ncv` when `svd_method=arpack` and `n_oversamples` when + `svd_method` is 'randomized`. + + mini_batch : bool, default=False + Whether to use mini-batch k-means, which is faster but may get + different results. + + init : {'k-means++', 'random'} or ndarray of shape (n_clusters, n_features), \ + default='k-means++' + Method for initialization of k-means algorithm; defaults to + 'k-means++'. + + n_init : int, default=10 + Number of random initializations that are tried with the + k-means algorithm. + + If mini-batch k-means is used, the best initialization is + chosen and the algorithm runs once. Otherwise, the algorithm + is run for each initialization and the best solution chosen. + + random_state : int, RandomState instance, default=None + Used for randomizing the singular value decomposition and the k-means + initialization. Use an int to make the randomness deterministic. + See :term:`Glossary `. + + Attributes + ---------- + rows_ : array-like of shape (n_row_clusters, n_rows) + Results of the clustering. `rows[i, r]` is True if + cluster `i` contains row `r`. Available only after calling ``fit``. + + columns_ : array-like of shape (n_column_clusters, n_columns) + Results of the clustering, like `rows`. + + row_labels_ : array-like of shape (n_rows,) + Row partition labels. + + column_labels_ : array-like of shape (n_cols,) + Column partition labels. + + biclusters_ : tuple of two ndarrays + The tuple contains the `rows_` and `columns_` arrays. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SpectralCoclustering : Spectral Co-Clustering algorithm (Dhillon, 2001). + + References + ---------- + + * :doi:`Kluger, Yuval, et. al., 2003. Spectral biclustering of microarray + data: coclustering genes and conditions. + <10.1101/gr.648603>` + + Examples + -------- + >>> from sklearn.cluster import SpectralBiclustering + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> clustering = SpectralBiclustering(n_clusters=2, random_state=0).fit(X) + >>> clustering.row_labels_ + array([1, 1, 1, 0, 0, 0], dtype=int32) + >>> clustering.column_labels_ + array([1, 0], dtype=int32) + >>> clustering + SpectralBiclustering(n_clusters=2, random_state=0) + """ + + _parameter_constraints: dict = { + **BaseSpectral._parameter_constraints, + "n_clusters": [Interval(Integral, 1, None, closed="left"), tuple], + "method": [StrOptions({"bistochastic", "scale", "log"})], + "n_components": [Interval(Integral, 1, None, closed="left")], + "n_best": [Interval(Integral, 1, None, closed="left")], + } + + def __init__( + self, + n_clusters=3, + *, + method="bistochastic", + n_components=6, + n_best=3, + svd_method="randomized", + n_svd_vecs=None, + mini_batch=False, + init="k-means++", + n_init=10, + random_state=None, + ): + super().__init__( + n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state + ) + self.method = method + self.n_components = n_components + self.n_best = n_best + + def _check_parameters(self, n_samples): + if isinstance(self.n_clusters, Integral): + if self.n_clusters > n_samples: + raise ValueError( + f"n_clusters should be <= n_samples={n_samples}. Got" + f" {self.n_clusters} instead." + ) + else: # tuple + try: + n_row_clusters, n_column_clusters = self.n_clusters + check_scalar( + n_row_clusters, + "n_row_clusters", + target_type=Integral, + min_val=1, + max_val=n_samples, + ) + check_scalar( + n_column_clusters, + "n_column_clusters", + target_type=Integral, + min_val=1, + max_val=n_samples, + ) + except (ValueError, TypeError) as e: + raise ValueError( + "Incorrect parameter n_clusters has value:" + f" {self.n_clusters}. It should either be a single integer" + " or an iterable with two integers:" + " (n_row_clusters, n_column_clusters)" + " And the values are should be in the" + " range: (1, n_samples)" + ) from e + + if self.n_best > self.n_components: + raise ValueError( + f"n_best={self.n_best} must be <= n_components={self.n_components}." + ) + + def _fit(self, X): + n_sv = self.n_components + if self.method == "bistochastic": + normalized_data = _bistochastic_normalize(X) + n_sv += 1 + elif self.method == "scale": + normalized_data, _, _ = _scale_normalize(X) + n_sv += 1 + elif self.method == "log": + normalized_data = _log_normalize(X) + n_discard = 0 if self.method == "log" else 1 + u, v = self._svd(normalized_data, n_sv, n_discard) + ut = u.T + vt = v.T + + try: + n_row_clusters, n_col_clusters = self.n_clusters + except TypeError: + n_row_clusters = n_col_clusters = self.n_clusters + + best_ut = self._fit_best_piecewise(ut, self.n_best, n_row_clusters) + + best_vt = self._fit_best_piecewise(vt, self.n_best, n_col_clusters) + + self.row_labels_ = self._project_and_cluster(X, best_vt.T, n_row_clusters) + + self.column_labels_ = self._project_and_cluster(X.T, best_ut.T, n_col_clusters) + + self.rows_ = np.vstack( + [ + self.row_labels_ == label + for label in range(n_row_clusters) + for _ in range(n_col_clusters) + ] + ) + self.columns_ = np.vstack( + [ + self.column_labels_ == label + for _ in range(n_row_clusters) + for label in range(n_col_clusters) + ] + ) + + def _fit_best_piecewise(self, vectors, n_best, n_clusters): + """Find the ``n_best`` vectors that are best approximated by piecewise + constant vectors. + + The piecewise vectors are found by k-means; the best is chosen + according to Euclidean distance. + + """ + + def make_piecewise(v): + centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters) + return centroid[labels].ravel() + + piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vectors) + dists = np.apply_along_axis(norm, axis=1, arr=(vectors - piecewise_vectors)) + result = vectors[np.argsort(dists)[:n_best]] + return result + + def _project_and_cluster(self, data, vectors, n_clusters): + """Project ``data`` to ``vectors`` and cluster the result.""" + projected = safe_sparse_dot(data, vectors) + _, labels = self._k_means(projected, n_clusters) + return labels diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_birch.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_birch.py new file mode 100644 index 0000000000000000000000000000000000000000..d62fb880ba8b2633dfb3ee70cc7ad4dc3203eeb3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_birch.py @@ -0,0 +1,741 @@ +# Authors: Manoj Kumar +# Alexandre Gramfort +# Joel Nothman +# License: BSD 3 clause + +import warnings +from math import sqrt +from numbers import Integral, Real + +import numpy as np +from scipy import sparse + +from .._config import config_context +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + ClusterMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..metrics import pairwise_distances_argmin +from ..metrics.pairwise import euclidean_distances +from ..utils._param_validation import Interval +from ..utils.extmath import row_norms +from ..utils.validation import check_is_fitted +from . import AgglomerativeClustering + + +def _iterate_sparse_X(X): + """This little hack returns a densified row when iterating over a sparse + matrix, instead of constructing a sparse matrix for every row that is + expensive. + """ + n_samples = X.shape[0] + X_indices = X.indices + X_data = X.data + X_indptr = X.indptr + + for i in range(n_samples): + row = np.zeros(X.shape[1]) + startptr, endptr = X_indptr[i], X_indptr[i + 1] + nonzero_indices = X_indices[startptr:endptr] + row[nonzero_indices] = X_data[startptr:endptr] + yield row + + +def _split_node(node, threshold, branching_factor): + """The node has to be split if there is no place for a new subcluster + in the node. + 1. Two empty nodes and two empty subclusters are initialized. + 2. The pair of distant subclusters are found. + 3. The properties of the empty subclusters and nodes are updated + according to the nearest distance between the subclusters to the + pair of distant subclusters. + 4. The two nodes are set as children to the two subclusters. + """ + new_subcluster1 = _CFSubcluster() + new_subcluster2 = _CFSubcluster() + new_node1 = _CFNode( + threshold=threshold, + branching_factor=branching_factor, + is_leaf=node.is_leaf, + n_features=node.n_features, + dtype=node.init_centroids_.dtype, + ) + new_node2 = _CFNode( + threshold=threshold, + branching_factor=branching_factor, + is_leaf=node.is_leaf, + n_features=node.n_features, + dtype=node.init_centroids_.dtype, + ) + new_subcluster1.child_ = new_node1 + new_subcluster2.child_ = new_node2 + + if node.is_leaf: + if node.prev_leaf_ is not None: + node.prev_leaf_.next_leaf_ = new_node1 + new_node1.prev_leaf_ = node.prev_leaf_ + new_node1.next_leaf_ = new_node2 + new_node2.prev_leaf_ = new_node1 + new_node2.next_leaf_ = node.next_leaf_ + if node.next_leaf_ is not None: + node.next_leaf_.prev_leaf_ = new_node2 + + dist = euclidean_distances( + node.centroids_, Y_norm_squared=node.squared_norm_, squared=True + ) + n_clusters = dist.shape[0] + + farthest_idx = np.unravel_index(dist.argmax(), (n_clusters, n_clusters)) + node1_dist, node2_dist = dist[(farthest_idx,)] + + node1_closer = node1_dist < node2_dist + # make sure node1 is closest to itself even if all distances are equal. + # This can only happen when all node.centroids_ are duplicates leading to all + # distances between centroids being zero. + node1_closer[farthest_idx[0]] = True + + for idx, subcluster in enumerate(node.subclusters_): + if node1_closer[idx]: + new_node1.append_subcluster(subcluster) + new_subcluster1.update(subcluster) + else: + new_node2.append_subcluster(subcluster) + new_subcluster2.update(subcluster) + return new_subcluster1, new_subcluster2 + + +class _CFNode: + """Each node in a CFTree is called a CFNode. + + The CFNode can have a maximum of branching_factor + number of CFSubclusters. + + Parameters + ---------- + threshold : float + Threshold needed for a new subcluster to enter a CFSubcluster. + + branching_factor : int + Maximum number of CF subclusters in each node. + + is_leaf : bool + We need to know if the CFNode is a leaf or not, in order to + retrieve the final subclusters. + + n_features : int + The number of features. + + Attributes + ---------- + subclusters_ : list + List of subclusters for a particular CFNode. + + prev_leaf_ : _CFNode + Useful only if is_leaf is True. + + next_leaf_ : _CFNode + next_leaf. Useful only if is_leaf is True. + the final subclusters. + + init_centroids_ : ndarray of shape (branching_factor + 1, n_features) + Manipulate ``init_centroids_`` throughout rather than centroids_ since + the centroids are just a view of the ``init_centroids_`` . + + init_sq_norm_ : ndarray of shape (branching_factor + 1,) + manipulate init_sq_norm_ throughout. similar to ``init_centroids_``. + + centroids_ : ndarray of shape (branching_factor + 1, n_features) + View of ``init_centroids_``. + + squared_norm_ : ndarray of shape (branching_factor + 1,) + View of ``init_sq_norm_``. + + """ + + def __init__(self, *, threshold, branching_factor, is_leaf, n_features, dtype): + self.threshold = threshold + self.branching_factor = branching_factor + self.is_leaf = is_leaf + self.n_features = n_features + + # The list of subclusters, centroids and squared norms + # to manipulate throughout. + self.subclusters_ = [] + self.init_centroids_ = np.zeros((branching_factor + 1, n_features), dtype=dtype) + self.init_sq_norm_ = np.zeros((branching_factor + 1), dtype) + self.squared_norm_ = [] + self.prev_leaf_ = None + self.next_leaf_ = None + + def append_subcluster(self, subcluster): + n_samples = len(self.subclusters_) + self.subclusters_.append(subcluster) + self.init_centroids_[n_samples] = subcluster.centroid_ + self.init_sq_norm_[n_samples] = subcluster.sq_norm_ + + # Keep centroids and squared norm as views. In this way + # if we change init_centroids and init_sq_norm_, it is + # sufficient, + self.centroids_ = self.init_centroids_[: n_samples + 1, :] + self.squared_norm_ = self.init_sq_norm_[: n_samples + 1] + + def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2): + """Remove a subcluster from a node and update it with the + split subclusters. + """ + ind = self.subclusters_.index(subcluster) + self.subclusters_[ind] = new_subcluster1 + self.init_centroids_[ind] = new_subcluster1.centroid_ + self.init_sq_norm_[ind] = new_subcluster1.sq_norm_ + self.append_subcluster(new_subcluster2) + + def insert_cf_subcluster(self, subcluster): + """Insert a new subcluster into the node.""" + if not self.subclusters_: + self.append_subcluster(subcluster) + return False + + threshold = self.threshold + branching_factor = self.branching_factor + # We need to find the closest subcluster among all the + # subclusters so that we can insert our new subcluster. + dist_matrix = np.dot(self.centroids_, subcluster.centroid_) + dist_matrix *= -2.0 + dist_matrix += self.squared_norm_ + closest_index = np.argmin(dist_matrix) + closest_subcluster = self.subclusters_[closest_index] + + # If the subcluster has a child, we need a recursive strategy. + if closest_subcluster.child_ is not None: + split_child = closest_subcluster.child_.insert_cf_subcluster(subcluster) + + if not split_child: + # If it is determined that the child need not be split, we + # can just update the closest_subcluster + closest_subcluster.update(subcluster) + self.init_centroids_[closest_index] = self.subclusters_[ + closest_index + ].centroid_ + self.init_sq_norm_[closest_index] = self.subclusters_[ + closest_index + ].sq_norm_ + return False + + # things not too good. we need to redistribute the subclusters in + # our child node, and add a new subcluster in the parent + # subcluster to accommodate the new child. + else: + new_subcluster1, new_subcluster2 = _split_node( + closest_subcluster.child_, + threshold, + branching_factor, + ) + self.update_split_subclusters( + closest_subcluster, new_subcluster1, new_subcluster2 + ) + + if len(self.subclusters_) > self.branching_factor: + return True + return False + + # good to go! + else: + merged = closest_subcluster.merge_subcluster(subcluster, self.threshold) + if merged: + self.init_centroids_[closest_index] = closest_subcluster.centroid_ + self.init_sq_norm_[closest_index] = closest_subcluster.sq_norm_ + return False + + # not close to any other subclusters, and we still + # have space, so add. + elif len(self.subclusters_) < self.branching_factor: + self.append_subcluster(subcluster) + return False + + # We do not have enough space nor is it closer to an + # other subcluster. We need to split. + else: + self.append_subcluster(subcluster) + return True + + +class _CFSubcluster: + """Each subcluster in a CFNode is called a CFSubcluster. + + A CFSubcluster can have a CFNode has its child. + + Parameters + ---------- + linear_sum : ndarray of shape (n_features,), default=None + Sample. This is kept optional to allow initialization of empty + subclusters. + + Attributes + ---------- + n_samples_ : int + Number of samples that belong to each subcluster. + + linear_sum_ : ndarray + Linear sum of all the samples in a subcluster. Prevents holding + all sample data in memory. + + squared_sum_ : float + Sum of the squared l2 norms of all samples belonging to a subcluster. + + centroid_ : ndarray of shape (branching_factor + 1, n_features) + Centroid of the subcluster. Prevent recomputing of centroids when + ``CFNode.centroids_`` is called. + + child_ : _CFNode + Child Node of the subcluster. Once a given _CFNode is set as the child + of the _CFNode, it is set to ``self.child_``. + + sq_norm_ : ndarray of shape (branching_factor + 1,) + Squared norm of the subcluster. Used to prevent recomputing when + pairwise minimum distances are computed. + """ + + def __init__(self, *, linear_sum=None): + if linear_sum is None: + self.n_samples_ = 0 + self.squared_sum_ = 0.0 + self.centroid_ = self.linear_sum_ = 0 + else: + self.n_samples_ = 1 + self.centroid_ = self.linear_sum_ = linear_sum + self.squared_sum_ = self.sq_norm_ = np.dot( + self.linear_sum_, self.linear_sum_ + ) + self.child_ = None + + def update(self, subcluster): + self.n_samples_ += subcluster.n_samples_ + self.linear_sum_ += subcluster.linear_sum_ + self.squared_sum_ += subcluster.squared_sum_ + self.centroid_ = self.linear_sum_ / self.n_samples_ + self.sq_norm_ = np.dot(self.centroid_, self.centroid_) + + def merge_subcluster(self, nominee_cluster, threshold): + """Check if a cluster is worthy enough to be merged. If + yes then merge. + """ + new_ss = self.squared_sum_ + nominee_cluster.squared_sum_ + new_ls = self.linear_sum_ + nominee_cluster.linear_sum_ + new_n = self.n_samples_ + nominee_cluster.n_samples_ + new_centroid = (1 / new_n) * new_ls + new_sq_norm = np.dot(new_centroid, new_centroid) + + # The squared radius of the cluster is defined: + # r^2 = sum_i ||x_i - c||^2 / n + # with x_i the n points assigned to the cluster and c its centroid: + # c = sum_i x_i / n + # This can be expanded to: + # r^2 = sum_i ||x_i||^2 / n - 2 < sum_i x_i / n, c> + n ||c||^2 / n + # and therefore simplifies to: + # r^2 = sum_i ||x_i||^2 / n - ||c||^2 + sq_radius = new_ss / new_n - new_sq_norm + + if sq_radius <= threshold**2: + ( + self.n_samples_, + self.linear_sum_, + self.squared_sum_, + self.centroid_, + self.sq_norm_, + ) = (new_n, new_ls, new_ss, new_centroid, new_sq_norm) + return True + return False + + @property + def radius(self): + """Return radius of the subcluster""" + # Because of numerical issues, this could become negative + sq_radius = self.squared_sum_ / self.n_samples_ - self.sq_norm_ + return sqrt(max(0, sq_radius)) + + +class Birch( + ClassNamePrefixFeaturesOutMixin, ClusterMixin, TransformerMixin, BaseEstimator +): + """Implements the BIRCH clustering algorithm. + + It is a memory-efficient, online-learning algorithm provided as an + alternative to :class:`MiniBatchKMeans`. It constructs a tree + data structure with the cluster centroids being read off the leaf. + These can be either the final cluster centroids or can be provided as input + to another clustering algorithm such as :class:`AgglomerativeClustering`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.16 + + Parameters + ---------- + threshold : float, default=0.5 + The radius of the subcluster obtained by merging a new sample and the + closest subcluster should be lesser than the threshold. Otherwise a new + subcluster is started. Setting this value to be very low promotes + splitting and vice-versa. + + branching_factor : int, default=50 + Maximum number of CF subclusters in each node. If a new samples enters + such that the number of subclusters exceed the branching_factor then + that node is split into two nodes with the subclusters redistributed + in each. The parent subcluster of that node is removed and two new + subclusters are added as parents of the 2 split nodes. + + n_clusters : int, instance of sklearn.cluster model or None, default=3 + Number of clusters after the final clustering step, which treats the + subclusters from the leaves as new samples. + + - `None` : the final clustering step is not performed and the + subclusters are returned as they are. + + - :mod:`sklearn.cluster` Estimator : If a model is provided, the model + is fit treating the subclusters as new samples and the initial data + is mapped to the label of the closest subcluster. + + - `int` : the model fit is :class:`AgglomerativeClustering` with + `n_clusters` set to be equal to the int. + + compute_labels : bool, default=True + Whether or not to compute labels for each fit. + + copy : bool, default=True + Whether or not to make a copy of the given data. If set to False, + the initial data will be overwritten. + + Attributes + ---------- + root_ : _CFNode + Root of the CFTree. + + dummy_leaf_ : _CFNode + Start pointer to all the leaves. + + subcluster_centers_ : ndarray + Centroids of all subclusters read directly from the leaves. + + subcluster_labels_ : ndarray + Labels assigned to the centroids of the subclusters after + they are clustered globally. + + labels_ : ndarray of shape (n_samples,) + Array of labels assigned to the input data. + if partial_fit is used instead of fit, they are assigned to the + last batch of data. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MiniBatchKMeans : Alternative implementation that does incremental updates + of the centers' positions using mini-batches. + + Notes + ----- + The tree data structure consists of nodes with each node consisting of + a number of subclusters. The maximum number of subclusters in a node + is determined by the branching factor. Each subcluster maintains a + linear sum, squared sum and the number of samples in that subcluster. + In addition, each subcluster can also have a node as its child, if the + subcluster is not a member of a leaf node. + + For a new point entering the root, it is merged with the subcluster closest + to it and the linear sum, squared sum and the number of samples of that + subcluster are updated. This is done recursively till the properties of + the leaf node are updated. + + References + ---------- + * Tian Zhang, Raghu Ramakrishnan, Maron Livny + BIRCH: An efficient data clustering method for large databases. + https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf + + * Roberto Perdisci + JBirch - Java implementation of BIRCH clustering algorithm + https://code.google.com/archive/p/jbirch + + Examples + -------- + >>> from sklearn.cluster import Birch + >>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]] + >>> brc = Birch(n_clusters=None) + >>> brc.fit(X) + Birch(n_clusters=None) + >>> brc.predict(X) + array([0, 0, 0, 1, 1, 1]) + """ + + _parameter_constraints: dict = { + "threshold": [Interval(Real, 0.0, None, closed="neither")], + "branching_factor": [Interval(Integral, 1, None, closed="neither")], + "n_clusters": [None, ClusterMixin, Interval(Integral, 1, None, closed="left")], + "compute_labels": ["boolean"], + "copy": ["boolean"], + } + + def __init__( + self, + *, + threshold=0.5, + branching_factor=50, + n_clusters=3, + compute_labels=True, + copy=True, + ): + self.threshold = threshold + self.branching_factor = branching_factor + self.n_clusters = n_clusters + self.compute_labels = compute_labels + self.copy = copy + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """ + Build a CF Tree for the input data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self + Fitted estimator. + """ + return self._fit(X, partial=False) + + def _fit(self, X, partial): + has_root = getattr(self, "root_", None) + first_call = not (partial and has_root) + + X = self._validate_data( + X, + accept_sparse="csr", + copy=self.copy, + reset=first_call, + dtype=[np.float64, np.float32], + ) + threshold = self.threshold + branching_factor = self.branching_factor + + n_samples, n_features = X.shape + + # If partial_fit is called for the first time or fit is called, we + # start a new tree. + if first_call: + # The first root is the leaf. Manipulate this object throughout. + self.root_ = _CFNode( + threshold=threshold, + branching_factor=branching_factor, + is_leaf=True, + n_features=n_features, + dtype=X.dtype, + ) + + # To enable getting back subclusters. + self.dummy_leaf_ = _CFNode( + threshold=threshold, + branching_factor=branching_factor, + is_leaf=True, + n_features=n_features, + dtype=X.dtype, + ) + self.dummy_leaf_.next_leaf_ = self.root_ + self.root_.prev_leaf_ = self.dummy_leaf_ + + # Cannot vectorize. Enough to convince to use cython. + if not sparse.issparse(X): + iter_func = iter + else: + iter_func = _iterate_sparse_X + + for sample in iter_func(X): + subcluster = _CFSubcluster(linear_sum=sample) + split = self.root_.insert_cf_subcluster(subcluster) + + if split: + new_subcluster1, new_subcluster2 = _split_node( + self.root_, threshold, branching_factor + ) + del self.root_ + self.root_ = _CFNode( + threshold=threshold, + branching_factor=branching_factor, + is_leaf=False, + n_features=n_features, + dtype=X.dtype, + ) + self.root_.append_subcluster(new_subcluster1) + self.root_.append_subcluster(new_subcluster2) + + centroids = np.concatenate([leaf.centroids_ for leaf in self._get_leaves()]) + self.subcluster_centers_ = centroids + self._n_features_out = self.subcluster_centers_.shape[0] + + self._global_clustering(X) + return self + + def _get_leaves(self): + """ + Retrieve the leaves of the CF Node. + + Returns + ------- + leaves : list of shape (n_leaves,) + List of the leaf nodes. + """ + leaf_ptr = self.dummy_leaf_.next_leaf_ + leaves = [] + while leaf_ptr is not None: + leaves.append(leaf_ptr) + leaf_ptr = leaf_ptr.next_leaf_ + return leaves + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X=None, y=None): + """ + Online learning. Prevents rebuilding of CFTree from scratch. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), \ + default=None + Input data. If X is not provided, only the global clustering + step is done. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self + Fitted estimator. + """ + if X is None: + # Perform just the final global clustering step. + self._global_clustering() + return self + else: + return self._fit(X, partial=True) + + def _check_fit(self, X): + check_is_fitted(self) + + if ( + hasattr(self, "subcluster_centers_") + and X.shape[1] != self.subcluster_centers_.shape[1] + ): + raise ValueError( + "Training data and predicted data do not have same number of features." + ) + + def predict(self, X): + """ + Predict data using the ``centroids_`` of subclusters. + + Avoid computation of the row norms of X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + Returns + ------- + labels : ndarray of shape(n_samples,) + Labelled data. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse="csr", reset=False) + return self._predict(X) + + def _predict(self, X): + """Predict data using the ``centroids_`` of subclusters.""" + kwargs = {"Y_norm_squared": self._subcluster_norms} + + with config_context(assume_finite=True): + argmin = pairwise_distances_argmin( + X, self.subcluster_centers_, metric_kwargs=kwargs + ) + return self.subcluster_labels_[argmin] + + def transform(self, X): + """ + Transform X into subcluster centroids dimension. + + Each dimension represents the distance from the sample point to each + cluster centroid. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + Returns + ------- + X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters) + Transformed data. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse="csr", reset=False) + with config_context(assume_finite=True): + return euclidean_distances(X, self.subcluster_centers_) + + def _global_clustering(self, X=None): + """ + Global clustering for the subclusters obtained after fitting + """ + clusterer = self.n_clusters + centroids = self.subcluster_centers_ + compute_labels = (X is not None) and self.compute_labels + + # Preprocessing for the global clustering. + not_enough_centroids = False + if isinstance(clusterer, Integral): + clusterer = AgglomerativeClustering(n_clusters=self.n_clusters) + # There is no need to perform the global clustering step. + if len(centroids) < self.n_clusters: + not_enough_centroids = True + + # To use in predict to avoid recalculation. + self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True) + + if clusterer is None or not_enough_centroids: + self.subcluster_labels_ = np.arange(len(centroids)) + if not_enough_centroids: + warnings.warn( + "Number of subclusters found (%d) by BIRCH is less " + "than (%d). Decrease the threshold." + % (len(centroids), self.n_clusters), + ConvergenceWarning, + ) + else: + # The global clustering step that clusters the subclusters of + # the leaves. It assumes the centroids of the subclusters as + # samples and finds the final centroids. + self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_) + + if compute_labels: + self.labels_ = self._predict(X) + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_bisect_k_means.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_bisect_k_means.py new file mode 100644 index 0000000000000000000000000000000000000000..a1f7716ced822cd9c9494d545daa67983c5e10d5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_bisect_k_means.py @@ -0,0 +1,529 @@ +"""Bisecting K-means clustering.""" +# Author: Michal Krawczyk + +import warnings + +import numpy as np +import scipy.sparse as sp + +from ..base import _fit_context +from ..utils._openmp_helpers import _openmp_effective_n_threads +from ..utils._param_validation import Integral, Interval, StrOptions +from ..utils.extmath import row_norms +from ..utils.validation import _check_sample_weight, check_is_fitted, check_random_state +from ._k_means_common import _inertia_dense, _inertia_sparse +from ._kmeans import ( + _BaseKMeans, + _kmeans_single_elkan, + _kmeans_single_lloyd, + _labels_inertia_threadpool_limit, +) + + +class _BisectingTree: + """Tree structure representing the hierarchical clusters of BisectingKMeans.""" + + def __init__(self, center, indices, score): + """Create a new cluster node in the tree. + + The node holds the center of this cluster and the indices of the data points + that belong to it. + """ + self.center = center + self.indices = indices + self.score = score + + self.left = None + self.right = None + + def split(self, labels, centers, scores): + """Split the cluster node into two subclusters.""" + self.left = _BisectingTree( + indices=self.indices[labels == 0], center=centers[0], score=scores[0] + ) + self.right = _BisectingTree( + indices=self.indices[labels == 1], center=centers[1], score=scores[1] + ) + + # reset the indices attribute to save memory + self.indices = None + + def get_cluster_to_bisect(self): + """Return the cluster node to bisect next. + + It's based on the score of the cluster, which can be either the number of + data points assigned to that cluster or the inertia of that cluster + (see `bisecting_strategy` for details). + """ + max_score = None + + for cluster_leaf in self.iter_leaves(): + if max_score is None or cluster_leaf.score > max_score: + max_score = cluster_leaf.score + best_cluster_leaf = cluster_leaf + + return best_cluster_leaf + + def iter_leaves(self): + """Iterate over all the cluster leaves in the tree.""" + if self.left is None: + yield self + else: + yield from self.left.iter_leaves() + yield from self.right.iter_leaves() + + +class BisectingKMeans(_BaseKMeans): + """Bisecting K-Means clustering. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.1 + + Parameters + ---------- + n_clusters : int, default=8 + The number of clusters to form as well as the number of + centroids to generate. + + init : {'k-means++', 'random'} or callable, default='random' + Method for initialization: + + 'k-means++' : selects initial cluster centers for k-mean + clustering in a smart way to speed up convergence. See section + Notes in k_init for more details. + + 'random': choose `n_clusters` observations (rows) at random from data + for the initial centroids. + + If a callable is passed, it should take arguments X, n_clusters and a + random state and return an initialization. + + n_init : int, default=1 + Number of time the inner k-means algorithm will be run with different + centroid seeds in each bisection. + That will result producing for each bisection best output of n_init + consecutive runs in terms of inertia. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for centroid initialization + in inner K-Means. Use an int to make the randomness deterministic. + See :term:`Glossary `. + + max_iter : int, default=300 + Maximum number of iterations of the inner k-means algorithm at each + bisection. + + verbose : int, default=0 + Verbosity mode. + + tol : float, default=1e-4 + Relative tolerance with regards to Frobenius norm of the difference + in the cluster centers of two consecutive iterations to declare + convergence. Used in inner k-means algorithm at each bisection to pick + best possible clusters. + + copy_x : bool, default=True + When pre-computing distances it is more numerically accurate to center + the data first. If copy_x is True (default), then the original data is + not modified. If False, the original data is modified, and put back + before the function returns, but small numerical differences may be + introduced by subtracting and then adding the data mean. Note that if + the original data is not C-contiguous, a copy will be made even if + copy_x is False. If the original data is sparse, but not in CSR format, + a copy will be made even if copy_x is False. + + algorithm : {"lloyd", "elkan"}, default="lloyd" + Inner K-means algorithm used in bisection. + The classical EM-style algorithm is `"lloyd"`. + The `"elkan"` variation can be more efficient on some datasets with + well-defined clusters, by using the triangle inequality. However it's + more memory intensive due to the allocation of an extra array of shape + `(n_samples, n_clusters)`. + + bisecting_strategy : {"biggest_inertia", "largest_cluster"},\ + default="biggest_inertia" + Defines how bisection should be performed: + + - "biggest_inertia" means that BisectingKMeans will always check + all calculated cluster for cluster with biggest SSE + (Sum of squared errors) and bisect it. This approach concentrates on + precision, but may be costly in terms of execution time (especially for + larger amount of data points). + + - "largest_cluster" - BisectingKMeans will always split cluster with + largest amount of points assigned to it from all clusters + previously calculated. That should work faster than picking by SSE + ('biggest_inertia') and may produce similar results in most cases. + + Attributes + ---------- + cluster_centers_ : ndarray of shape (n_clusters, n_features) + Coordinates of cluster centers. If the algorithm stops before fully + converging (see ``tol`` and ``max_iter``), these will not be + consistent with ``labels_``. + + labels_ : ndarray of shape (n_samples,) + Labels of each point. + + inertia_ : float + Sum of squared distances of samples to their closest cluster center, + weighted by the sample weights if provided. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + See Also + -------- + KMeans : Original implementation of K-Means algorithm. + + Notes + ----- + It might be inefficient when n_cluster is less than 3, due to unnecessary + calculations for that case. + + Examples + -------- + >>> from sklearn.cluster import BisectingKMeans + >>> import numpy as np + >>> X = np.array([[1, 1], [10, 1], [3, 1], + ... [10, 0], [2, 1], [10, 2], + ... [10, 8], [10, 9], [10, 10]]) + >>> bisect_means = BisectingKMeans(n_clusters=3, random_state=0).fit(X) + >>> bisect_means.labels_ + array([0, 2, 0, 2, 0, 2, 1, 1, 1], dtype=int32) + >>> bisect_means.predict([[0, 0], [12, 3]]) + array([0, 2], dtype=int32) + >>> bisect_means.cluster_centers_ + array([[ 2., 1.], + [10., 9.], + [10., 1.]]) + """ + + _parameter_constraints: dict = { + **_BaseKMeans._parameter_constraints, + "init": [StrOptions({"k-means++", "random"}), callable], + "n_init": [Interval(Integral, 1, None, closed="left")], + "copy_x": ["boolean"], + "algorithm": [StrOptions({"lloyd", "elkan"})], + "bisecting_strategy": [StrOptions({"biggest_inertia", "largest_cluster"})], + } + + def __init__( + self, + n_clusters=8, + *, + init="random", + n_init=1, + random_state=None, + max_iter=300, + verbose=0, + tol=1e-4, + copy_x=True, + algorithm="lloyd", + bisecting_strategy="biggest_inertia", + ): + super().__init__( + n_clusters=n_clusters, + init=init, + max_iter=max_iter, + verbose=verbose, + random_state=random_state, + tol=tol, + n_init=n_init, + ) + + self.copy_x = copy_x + self.algorithm = algorithm + self.bisecting_strategy = bisecting_strategy + + def _warn_mkl_vcomp(self, n_active_threads): + """Warn when vcomp and mkl are both present""" + warnings.warn( + "BisectingKMeans is known to have a memory leak on Windows " + "with MKL, when there are less chunks than available " + "threads. You can avoid it by setting the environment" + f" variable OMP_NUM_THREADS={n_active_threads}." + ) + + def _inertia_per_cluster(self, X, centers, labels, sample_weight): + """Calculate the sum of squared errors (inertia) per cluster. + + Parameters + ---------- + X : {ndarray, csr_matrix} of shape (n_samples, n_features) + The input samples. + + centers : ndarray of shape (n_clusters=2, n_features) + The cluster centers. + + labels : ndarray of shape (n_samples,) + Index of the cluster each sample belongs to. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in X. + + Returns + ------- + inertia_per_cluster : ndarray of shape (n_clusters=2,) + Sum of squared errors (inertia) for each cluster. + """ + n_clusters = centers.shape[0] # = 2 since centers comes from a bisection + _inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense + + inertia_per_cluster = np.empty(n_clusters) + for label in range(n_clusters): + inertia_per_cluster[label] = _inertia( + X, sample_weight, centers, labels, self._n_threads, single_label=label + ) + + return inertia_per_cluster + + def _bisect(self, X, x_squared_norms, sample_weight, cluster_to_bisect): + """Split a cluster into 2 subsclusters. + + Parameters + ---------- + X : {ndarray, csr_matrix} of shape (n_samples, n_features) + Training instances to cluster. + + x_squared_norms : ndarray of shape (n_samples,) + Squared euclidean norm of each data point. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in X. + + cluster_to_bisect : _BisectingTree node object + The cluster node to split. + """ + X = X[cluster_to_bisect.indices] + x_squared_norms = x_squared_norms[cluster_to_bisect.indices] + sample_weight = sample_weight[cluster_to_bisect.indices] + + best_inertia = None + + # Split samples in X into 2 clusters. + # Repeating `n_init` times to obtain best clusters + for _ in range(self.n_init): + centers_init = self._init_centroids( + X, + x_squared_norms=x_squared_norms, + init=self.init, + random_state=self._random_state, + n_centroids=2, + sample_weight=sample_weight, + ) + + labels, inertia, centers, _ = self._kmeans_single( + X, + sample_weight, + centers_init, + max_iter=self.max_iter, + verbose=self.verbose, + tol=self.tol, + n_threads=self._n_threads, + ) + + # allow small tolerance on the inertia to accommodate for + # non-deterministic rounding errors due to parallel computation + if best_inertia is None or inertia < best_inertia * (1 - 1e-6): + best_labels = labels + best_centers = centers + best_inertia = inertia + + if self.verbose: + print(f"New centroids from bisection: {best_centers}") + + if self.bisecting_strategy == "biggest_inertia": + scores = self._inertia_per_cluster( + X, best_centers, best_labels, sample_weight + ) + else: # bisecting_strategy == "largest_cluster" + # Using minlength to make sure that we have the counts for both labels even + # if all samples are labelled 0. + scores = np.bincount(best_labels, minlength=2) + + cluster_to_bisect.split(best_labels, best_centers, scores) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, sample_weight=None): + """Compute bisecting k-means clustering. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + + Training instances to cluster. + + .. note:: The data will be converted to C ordering, + which will cause a memory copy + if the given data is not C-contiguous. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. `sample_weight` is not used during + initialization if `init` is a callable. + + Returns + ------- + self + Fitted estimator. + """ + X = self._validate_data( + X, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + copy=self.copy_x, + accept_large_sparse=False, + ) + + self._check_params_vs_input(X) + + self._random_state = check_random_state(self.random_state) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + self._n_threads = _openmp_effective_n_threads() + + if self.algorithm == "lloyd" or self.n_clusters == 1: + self._kmeans_single = _kmeans_single_lloyd + self._check_mkl_vcomp(X, X.shape[0]) + else: + self._kmeans_single = _kmeans_single_elkan + + # Subtract of mean of X for more accurate distance computations + if not sp.issparse(X): + self._X_mean = X.mean(axis=0) + X -= self._X_mean + + # Initialize the hierarchical clusters tree + self._bisecting_tree = _BisectingTree( + indices=np.arange(X.shape[0]), + center=X.mean(axis=0), + score=0, + ) + + x_squared_norms = row_norms(X, squared=True) + + for _ in range(self.n_clusters - 1): + # Chose cluster to bisect + cluster_to_bisect = self._bisecting_tree.get_cluster_to_bisect() + + # Split this cluster into 2 subclusters + self._bisect(X, x_squared_norms, sample_weight, cluster_to_bisect) + + # Aggregate final labels and centers from the bisecting tree + self.labels_ = np.full(X.shape[0], -1, dtype=np.int32) + self.cluster_centers_ = np.empty((self.n_clusters, X.shape[1]), dtype=X.dtype) + + for i, cluster_node in enumerate(self._bisecting_tree.iter_leaves()): + self.labels_[cluster_node.indices] = i + self.cluster_centers_[i] = cluster_node.center + cluster_node.label = i # label final clusters for future prediction + cluster_node.indices = None # release memory + + # Restore original data + if not sp.issparse(X): + X += self._X_mean + self.cluster_centers_ += self._X_mean + + _inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense + self.inertia_ = _inertia( + X, sample_weight, self.cluster_centers_, self.labels_, self._n_threads + ) + + self._n_features_out = self.cluster_centers_.shape[0] + + return self + + def predict(self, X): + """Predict which cluster each sample in X belongs to. + + Prediction is made by going down the hierarchical tree + in searching of closest leaf cluster. + + In the vector quantization literature, `cluster_centers_` is called + the code book and each value returned by `predict` is the index of + the closest code in the code book. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data to predict. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Index of the cluster each sample belongs to. + """ + check_is_fitted(self) + + X = self._check_test_data(X) + x_squared_norms = row_norms(X, squared=True) + + # sample weights are unused but necessary in cython helpers + sample_weight = np.ones_like(x_squared_norms) + + labels = self._predict_recursive(X, sample_weight, self._bisecting_tree) + + return labels + + def _predict_recursive(self, X, sample_weight, cluster_node): + """Predict recursively by going down the hierarchical tree. + + Parameters + ---------- + X : {ndarray, csr_matrix} of shape (n_samples, n_features) + The data points, currently assigned to `cluster_node`, to predict between + the subclusters of this node. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in X. + + cluster_node : _BisectingTree node object + The cluster node of the hierarchical tree. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Index of the cluster each sample belongs to. + """ + if cluster_node.left is None: + # This cluster has no subcluster. Labels are just the label of the cluster. + return np.full(X.shape[0], cluster_node.label, dtype=np.int32) + + # Determine if data points belong to the left or right subcluster + centers = np.vstack((cluster_node.left.center, cluster_node.right.center)) + if hasattr(self, "_X_mean"): + centers += self._X_mean + + cluster_labels = _labels_inertia_threadpool_limit( + X, + sample_weight, + centers, + self._n_threads, + return_inertia=False, + ) + mask = cluster_labels == 0 + + # Compute the labels for each subset of the data points. + labels = np.full(X.shape[0], -1, dtype=np.int32) + + labels[mask] = self._predict_recursive( + X[mask], sample_weight[mask], cluster_node.left + ) + + labels[~mask] = self._predict_recursive( + X[~mask], sample_weight[~mask], cluster_node.right + ) + + return labels + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py new file mode 100644 index 0000000000000000000000000000000000000000..fbcbd73dfbb3bbf4e27243a5d266cb0fd19bd276 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py @@ -0,0 +1,476 @@ +""" +DBSCAN: Density-Based Spatial Clustering of Applications with Noise +""" + +# Author: Robert Layton +# Joel Nothman +# Lars Buitinck +# +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import sparse + +from ..base import BaseEstimator, ClusterMixin, _fit_context +from ..metrics.pairwise import _VALID_METRICS +from ..neighbors import NearestNeighbors +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.validation import _check_sample_weight +from ._dbscan_inner import dbscan_inner + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=False, +) +def dbscan( + X, + eps=0.5, + *, + min_samples=5, + metric="minkowski", + metric_params=None, + algorithm="auto", + leaf_size=30, + p=2, + sample_weight=None, + n_jobs=None, +): + """Perform DBSCAN clustering from vector array or distance matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse (CSR) matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) + A feature array, or array of distances between samples if + ``metric='precomputed'``. + + eps : float, default=0.5 + The maximum distance between two samples for one to be considered + as in the neighborhood of the other. This is not a maximum bound + on the distances of points within a cluster. This is the most + important DBSCAN parameter to choose appropriately for your data set + and distance function. + + min_samples : int, default=5 + The number of samples (or total weight) in a neighborhood for a point + to be considered as a core point. This includes the point itself. + + metric : str or callable, default='minkowski' + The metric to use when calculating distance between instances in a + feature array. If metric is a string or callable, it must be one of + the options allowed by :func:`sklearn.metrics.pairwise_distances` for + its metric parameter. + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. + X may be a :term:`sparse graph `, + in which case only "nonzero" elements may be considered neighbors. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + .. versionadded:: 0.19 + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + The algorithm to be used by the NearestNeighbors module + to compute pointwise distances and find nearest neighbors. + See NearestNeighbors module documentation for details. + + leaf_size : int, default=30 + Leaf size passed to BallTree or cKDTree. This can affect the speed + of the construction and query, as well as the memory required + to store the tree. The optimal value depends + on the nature of the problem. + + p : float, default=2 + The power of the Minkowski metric to be used to calculate distance + between points. + + sample_weight : array-like of shape (n_samples,), default=None + Weight of each sample, such that a sample with a weight of at least + ``min_samples`` is by itself a core sample; a sample with negative + weight may inhibit its eps-neighbor from being core. + Note that weights are absolute, and default to 1. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. ``None`` means + 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means + using all processors. See :term:`Glossary ` for more details. + If precomputed distance are used, parallel execution is not available + and thus n_jobs will have no effect. + + Returns + ------- + core_samples : ndarray of shape (n_core_samples,) + Indices of core samples. + + labels : ndarray of shape (n_samples,) + Cluster labels for each point. Noisy samples are given the label -1. + + See Also + -------- + DBSCAN : An estimator interface for this clustering algorithm. + OPTICS : A similar estimator interface clustering at multiple values of + eps. Our implementation is optimized for memory usage. + + Notes + ----- + For an example, see :ref:`examples/cluster/plot_dbscan.py + `. + + This implementation bulk-computes all neighborhood queries, which increases + the memory complexity to O(n.d) where d is the average number of neighbors, + while original DBSCAN had memory complexity O(n). It may attract a higher + memory complexity when querying these nearest neighborhoods, depending + on the ``algorithm``. + + One way to avoid the query complexity is to pre-compute sparse + neighborhoods in chunks using + :func:`NearestNeighbors.radius_neighbors_graph + ` with + ``mode='distance'``, then using ``metric='precomputed'`` here. + + Another way to reduce memory and computation time is to remove + (near-)duplicate points and use ``sample_weight`` instead. + + :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower + memory usage. + + References + ---------- + Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based + Algorithm for Discovering Clusters in Large Spatial Databases with Noise" + `_. + In: Proceedings of the 2nd International Conference on Knowledge Discovery + and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 + + Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). + :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." + <10.1145/3068335>` + ACM Transactions on Database Systems (TODS), 42(3), 19. + + Examples + -------- + >>> from sklearn.cluster import dbscan + >>> X = [[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]] + >>> core_samples, labels = dbscan(X, eps=3, min_samples=2) + >>> core_samples + array([0, 1, 2, 3, 4]) + >>> labels + array([ 0, 0, 0, 1, 1, -1]) + """ + + est = DBSCAN( + eps=eps, + min_samples=min_samples, + metric=metric, + metric_params=metric_params, + algorithm=algorithm, + leaf_size=leaf_size, + p=p, + n_jobs=n_jobs, + ) + est.fit(X, sample_weight=sample_weight) + return est.core_sample_indices_, est.labels_ + + +class DBSCAN(ClusterMixin, BaseEstimator): + """Perform DBSCAN clustering from vector array or distance matrix. + + DBSCAN - Density-Based Spatial Clustering of Applications with Noise. + Finds core samples of high density and expands clusters from them. + Good for data which contains clusters of similar density. + + The worst case memory complexity of DBSCAN is :math:`O({n}^2)`, which can + occur when the `eps` param is large and `min_samples` is low. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + eps : float, default=0.5 + The maximum distance between two samples for one to be considered + as in the neighborhood of the other. This is not a maximum bound + on the distances of points within a cluster. This is the most + important DBSCAN parameter to choose appropriately for your data set + and distance function. + + min_samples : int, default=5 + The number of samples (or total weight) in a neighborhood for a point to + be considered as a core point. This includes the point itself. If + `min_samples` is set to a higher value, DBSCAN will find denser clusters, + whereas if it is set to a lower value, the found clusters will be more + sparse. + + metric : str, or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. If metric is a string or callable, it must be one of + the options allowed by :func:`sklearn.metrics.pairwise_distances` for + its metric parameter. + If metric is "precomputed", X is assumed to be a distance matrix and + must be square. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors for DBSCAN. + + .. versionadded:: 0.17 + metric *precomputed* to accept precomputed sparse matrix. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + .. versionadded:: 0.19 + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + The algorithm to be used by the NearestNeighbors module + to compute pointwise distances and find nearest neighbors. + See NearestNeighbors module documentation for details. + + leaf_size : int, default=30 + Leaf size passed to BallTree or cKDTree. This can affect the speed + of the construction and query, as well as the memory required + to store the tree. The optimal value depends + on the nature of the problem. + + p : float, default=None + The power of the Minkowski metric to be used to calculate distance + between points. If None, then ``p=2`` (equivalent to the Euclidean + distance). + + n_jobs : int, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + core_sample_indices_ : ndarray of shape (n_core_samples,) + Indices of core samples. + + components_ : ndarray of shape (n_core_samples, n_features) + Copy of each core sample found by training. + + labels_ : ndarray of shape (n_samples) + Cluster labels for each point in the dataset given to fit(). + Noisy samples are given the label -1. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + OPTICS : A similar clustering at multiple values of eps. Our implementation + is optimized for memory usage. + + Notes + ----- + For an example, see :ref:`examples/cluster/plot_dbscan.py + `. + + This implementation bulk-computes all neighborhood queries, which increases + the memory complexity to O(n.d) where d is the average number of neighbors, + while original DBSCAN had memory complexity O(n). It may attract a higher + memory complexity when querying these nearest neighborhoods, depending + on the ``algorithm``. + + One way to avoid the query complexity is to pre-compute sparse + neighborhoods in chunks using + :func:`NearestNeighbors.radius_neighbors_graph + ` with + ``mode='distance'``, then using ``metric='precomputed'`` here. + + Another way to reduce memory and computation time is to remove + (near-)duplicate points and use ``sample_weight`` instead. + + :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory + usage. + + References + ---------- + Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based + Algorithm for Discovering Clusters in Large Spatial Databases with Noise" + `_. + In: Proceedings of the 2nd International Conference on Knowledge Discovery + and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 + + Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). + :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." + <10.1145/3068335>` + ACM Transactions on Database Systems (TODS), 42(3), 19. + + Examples + -------- + >>> from sklearn.cluster import DBSCAN + >>> import numpy as np + >>> X = np.array([[1, 2], [2, 2], [2, 3], + ... [8, 7], [8, 8], [25, 80]]) + >>> clustering = DBSCAN(eps=3, min_samples=2).fit(X) + >>> clustering.labels_ + array([ 0, 0, 0, 1, 1, -1]) + >>> clustering + DBSCAN(eps=3, min_samples=2) + """ + + _parameter_constraints: dict = { + "eps": [Interval(Real, 0.0, None, closed="neither")], + "min_samples": [Interval(Integral, 1, None, closed="left")], + "metric": [ + StrOptions(set(_VALID_METRICS) | {"precomputed"}), + callable, + ], + "metric_params": [dict, None], + "algorithm": [StrOptions({"auto", "ball_tree", "kd_tree", "brute"})], + "leaf_size": [Interval(Integral, 1, None, closed="left")], + "p": [Interval(Real, 0.0, None, closed="left"), None], + "n_jobs": [Integral, None], + } + + def __init__( + self, + eps=0.5, + *, + min_samples=5, + metric="euclidean", + metric_params=None, + algorithm="auto", + leaf_size=30, + p=None, + n_jobs=None, + ): + self.eps = eps + self.min_samples = min_samples + self.metric = metric + self.metric_params = metric_params + self.algorithm = algorithm + self.leaf_size = leaf_size + self.p = p + self.n_jobs = n_jobs + + @_fit_context( + # DBSCAN.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, sample_weight=None): + """Perform DBSCAN clustering from features, or distance matrix. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ + (n_samples, n_samples) + Training instances to cluster, or distances between instances if + ``metric='precomputed'``. If a sparse matrix is provided, it will + be converted into a sparse ``csr_matrix``. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + Weight of each sample, such that a sample with a weight of at least + ``min_samples`` is by itself a core sample; a sample with a + negative weight may inhibit its eps-neighbor from being core. + Note that weights are absolute, and default to 1. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + X = self._validate_data(X, accept_sparse="csr") + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + # Calculate neighborhood for all samples. This leaves the original + # point in, which needs to be considered later (i.e. point i is in the + # neighborhood of point i. While True, its useless information) + if self.metric == "precomputed" and sparse.issparse(X): + # set the diagonal to explicit values, as a point is its own + # neighbor + X = X.copy() # copy to avoid in-place modification + with warnings.catch_warnings(): + warnings.simplefilter("ignore", sparse.SparseEfficiencyWarning) + X.setdiag(X.diagonal()) + + neighbors_model = NearestNeighbors( + radius=self.eps, + algorithm=self.algorithm, + leaf_size=self.leaf_size, + metric=self.metric, + metric_params=self.metric_params, + p=self.p, + n_jobs=self.n_jobs, + ) + neighbors_model.fit(X) + # This has worst case O(n^2) memory complexity + neighborhoods = neighbors_model.radius_neighbors(X, return_distance=False) + + if sample_weight is None: + n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods]) + else: + n_neighbors = np.array( + [np.sum(sample_weight[neighbors]) for neighbors in neighborhoods] + ) + + # Initially, all samples are noise. + labels = np.full(X.shape[0], -1, dtype=np.intp) + + # A list of all core samples found. + core_samples = np.asarray(n_neighbors >= self.min_samples, dtype=np.uint8) + dbscan_inner(core_samples, neighborhoods, labels) + + self.core_sample_indices_ = np.where(core_samples)[0] + self.labels_ = labels + + if len(self.core_sample_indices_): + # fix for scipy sparse indexing issue + self.components_ = X[self.core_sample_indices_].copy() + else: + # no core samples + self.components_ = np.empty((0, X.shape[1])) + return self + + def fit_predict(self, X, y=None, sample_weight=None): + """Compute clusters from a data or distance matrix and predict labels. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ + (n_samples, n_samples) + Training instances to cluster, or distances between instances if + ``metric='precomputed'``. If a sparse matrix is provided, it will + be converted into a sparse ``csr_matrix``. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + Weight of each sample, such that a sample with a weight of at least + ``min_samples`` is by itself a core sample; a sample with a + negative weight may inhibit its eps-neighbor from being core. + Note that weights are absolute, and default to 1. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Cluster labels. Noisy samples are given the label -1. + """ + self.fit(X, sample_weight=sample_weight) + return self.labels_ + + def _more_tags(self): + return {"pairwise": self.metric == "precomputed"} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_dbscan_inner.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_dbscan_inner.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..667db1311faeba2e5913b686209611926526988f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_dbscan_inner.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_feature_agglomeration.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_feature_agglomeration.py new file mode 100644 index 0000000000000000000000000000000000000000..f84f18c1c18b3b71f4efcab8445c93f56a609318 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_feature_agglomeration.py @@ -0,0 +1,104 @@ +""" +Feature agglomeration. Base classes and functions for performing feature +agglomeration. +""" +# Author: V. Michel, A. Gramfort +# License: BSD 3 clause + +import warnings + +import numpy as np +from scipy.sparse import issparse + +from ..base import TransformerMixin +from ..utils import metadata_routing +from ..utils.validation import check_is_fitted + +############################################################################### +# Mixin class for feature agglomeration. + + +class AgglomerationTransform(TransformerMixin): + """ + A class for feature agglomeration via the transform interface. + """ + + # This prevents ``set_split_inverse_transform`` to be generated for the + # non-standard ``Xred`` arg on ``inverse_transform``. + # TODO(1.5): remove when Xred is removed for inverse_transform. + __metadata_request__inverse_transform = {"Xred": metadata_routing.UNUSED} + + def transform(self, X): + """ + Transform a new matrix using the built clustering. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or \ + (n_samples, n_samples) + A M by N array of M observations in N dimensions or a length + M array of M one-dimensional observations. + + Returns + ------- + Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,) + The pooled values for each feature cluster. + """ + check_is_fitted(self) + + X = self._validate_data(X, reset=False) + if self.pooling_func == np.mean and not issparse(X): + size = np.bincount(self.labels_) + n_samples = X.shape[0] + # a fast way to compute the mean of grouped features + nX = np.array( + [np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)] + ) + else: + nX = [ + self.pooling_func(X[:, self.labels_ == l], axis=1) + for l in np.unique(self.labels_) + ] + nX = np.array(nX).T + return nX + + def inverse_transform(self, Xt=None, Xred=None): + """ + Inverse the transformation and return a vector of size `n_features`. + + Parameters + ---------- + Xt : array-like of shape (n_samples, n_clusters) or (n_clusters,) + The values to be assigned to each cluster of samples. + + Xred : deprecated + Use `Xt` instead. + + .. deprecated:: 1.3 + + Returns + ------- + X : ndarray of shape (n_samples, n_features) or (n_features,) + A vector of size `n_samples` with the values of `Xred` assigned to + each of the cluster of samples. + """ + if Xt is None and Xred is None: + raise TypeError("Missing required positional argument: Xt") + + if Xred is not None and Xt is not None: + raise ValueError("Please provide only `Xt`, and not `Xred`.") + + if Xred is not None: + warnings.warn( + ( + "Input argument `Xred` was renamed to `Xt` in v1.3 and will be" + " removed in v1.5." + ), + FutureWarning, + ) + Xt = Xred + + check_is_fitted(self) + + unil, inverse = np.unique(self.labels_, return_inverse=True) + return Xt[..., inverse] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5edaebafb4af813dda4f8d2be15474104a7754e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/hdbscan.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/hdbscan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81c13a11e35c01513a0ac8f541246014cbeb0f82 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/hdbscan.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_linkage.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_linkage.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..279891c90c77167e59a3a34a55e5d097ff39865f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_linkage.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_reachability.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_reachability.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0ae697dfa151ee889311e2d8b0bac0d4060227b0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_reachability.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e7c4aae7d1b407bb43d4f6a805b6ac14dd1f19b3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.pxd new file mode 100644 index 0000000000000000000000000000000000000000..23708b9a38d07884c035b88e260821146075f861 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.pxd @@ -0,0 +1,49 @@ +# Copyright (c) 2015, Leland McInnes +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from ...utils._typedefs cimport intp_t, float64_t, uint8_t +cimport numpy as cnp + +# This corresponds to the scipy.cluster.hierarchy format +ctypedef packed struct HIERARCHY_t: + intp_t left_node + intp_t right_node + float64_t value + intp_t cluster_size + +# Effectively an edgelist encoding a parent/child pair, along with a value and +# the corresponding cluster_size in each row providing a tree structure. +ctypedef packed struct CONDENSED_t: + intp_t parent + intp_t child + float64_t value + intp_t cluster_size + +cdef extern from "numpy/arrayobject.h": + intp_t * PyArray_SHAPE(cnp.PyArrayObject *) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/hdbscan.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/hdbscan.py new file mode 100644 index 0000000000000000000000000000000000000000..fc51f10cffba01a4fbe8f40398d2e1b9704cf571 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/hdbscan.py @@ -0,0 +1,1018 @@ +""" +HDBSCAN: Hierarchical Density-Based Spatial Clustering + of Applications with Noise +""" +# Authors: Leland McInnes +# Steve Astels +# John Healy +# Meekail Zain +# Copyright (c) 2015, Leland McInnes +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from numbers import Integral, Real +from warnings import warn + +import numpy as np +from scipy.sparse import csgraph, issparse + +from ...base import BaseEstimator, ClusterMixin, _fit_context +from ...metrics import pairwise_distances +from ...metrics._dist_metrics import DistanceMetric +from ...neighbors import BallTree, KDTree, NearestNeighbors +from ...utils._param_validation import Interval, StrOptions +from ...utils.validation import _allclose_dense_sparse, _assert_all_finite +from ._linkage import ( + MST_edge_dtype, + make_single_linkage, + mst_from_data_matrix, + mst_from_mutual_reachability, +) +from ._reachability import mutual_reachability_graph +from ._tree import HIERARCHY_dtype, labelling_at_cut, tree_to_labels + +FAST_METRICS = set(KDTree.valid_metrics + BallTree.valid_metrics) + +# Encodings are arbitrary but must be strictly negative. +# The current encodings are chosen as extensions to the -1 noise label. +# Avoided enums so that the end user only deals with simple labels. +_OUTLIER_ENCODING: dict = { + "infinite": { + "label": -2, + # The probability could also be 1, since infinite points are certainly + # infinite outliers, however 0 is convention from the HDBSCAN library + # implementation. + "prob": 0, + }, + "missing": { + "label": -3, + # A nan probability is chosen to emphasize the fact that the + # corresponding data was not considered in the clustering problem. + "prob": np.nan, + }, +} + + +def _brute_mst(mutual_reachability, min_samples): + """ + Builds a minimum spanning tree (MST) from the provided mutual-reachability + values. This function dispatches to a custom Cython implementation for + dense arrays, and `scipy.sparse.csgraph.minimum_spanning_tree` for sparse + arrays/matrices. + + Parameters + ---------- + mututal_reachability_graph: {ndarray, sparse matrix} of shape \ + (n_samples, n_samples) + Weighted adjacency matrix of the mutual reachability graph. + + min_samples : int, default=None + The number of samples in a neighborhood for a point + to be considered as a core point. This includes the point itself. + + Returns + ------- + mst : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype + The MST representation of the mutual-reachability graph. The MST is + represented as a collection of edges. + """ + if not issparse(mutual_reachability): + return mst_from_mutual_reachability(mutual_reachability) + + # Check if the mutual reachability matrix has any rows which have + # less than `min_samples` non-zero elements. + indptr = mutual_reachability.indptr + num_points = mutual_reachability.shape[0] + if any((indptr[i + 1] - indptr[i]) < min_samples for i in range(num_points)): + raise ValueError( + f"There exists points with fewer than {min_samples} neighbors. Ensure" + " your distance matrix has non-zero values for at least" + f" `min_sample`={min_samples} neighbors for each points (i.e. K-nn" + " graph), or specify a `max_distance` in `metric_params` to use when" + " distances are missing." + ) + # Check connected component on mutual reachability. + # If more than one connected component is present, + # it means that the graph is disconnected. + n_components = csgraph.connected_components( + mutual_reachability, directed=False, return_labels=False + ) + if n_components > 1: + raise ValueError( + f"Sparse mutual reachability matrix has {n_components} connected" + " components. HDBSCAN cannot be perfomed on a disconnected graph. Ensure" + " that the sparse distance matrix has only one connected component." + ) + + # Compute the minimum spanning tree for the sparse graph + sparse_min_spanning_tree = csgraph.minimum_spanning_tree(mutual_reachability) + rows, cols = sparse_min_spanning_tree.nonzero() + mst = np.rec.fromarrays( + [rows, cols, sparse_min_spanning_tree.data], + dtype=MST_edge_dtype, + ) + return mst + + +def _process_mst(min_spanning_tree): + """ + Builds a single-linkage tree (SLT) from the provided minimum spanning tree + (MST). The MST is first sorted then processed by a custom Cython routine. + + Parameters + ---------- + min_spanning_tree : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype + The MST representation of the mutual-reachability graph. The MST is + represented as a collection of edges. + + Returns + ------- + single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype + The single-linkage tree tree (dendrogram) built from the MST. + """ + # Sort edges of the min_spanning_tree by weight + row_order = np.argsort(min_spanning_tree["distance"]) + min_spanning_tree = min_spanning_tree[row_order] + # Convert edge list into standard hierarchical clustering format + return make_single_linkage(min_spanning_tree) + + +def _hdbscan_brute( + X, + min_samples=5, + alpha=None, + metric="euclidean", + n_jobs=None, + copy=False, + **metric_params, +): + """ + Builds a single-linkage tree (SLT) from the input data `X`. If + `metric="precomputed"` then `X` must be a symmetric array of distances. + Otherwise, the pairwise distances are calculated directly and passed to + `mutual_reachability_graph`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples) + Either the raw data from which to compute the pairwise distances, + or the precomputed distances. + + min_samples : int, default=None + The number of samples in a neighborhood for a point + to be considered as a core point. This includes the point itself. + + alpha : float, default=1.0 + A distance scaling parameter as used in robust single linkage. + + metric : str or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. + + - If metric is a string or callable, it must be one of + the options allowed by :func:`~sklearn.metrics.pairwise_distances` + for its metric parameter. + + - If metric is "precomputed", X is assumed to be a distance matrix and + must be square. + + n_jobs : int, default=None + The number of jobs to use for computing the pairwise distances. This + works by breaking down the pairwise matrix into n_jobs even slices and + computing them in parallel. This parameter is passed directly to + :func:`~sklearn.metrics.pairwise_distances`. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + copy : bool, default=False + If `copy=True` then any time an in-place modifications would be made + that would overwrite `X`, a copy will first be made, guaranteeing that + the original data will be unchanged. Currently, it only applies when + `metric="precomputed"`, when passing a dense array or a CSR sparse + array/matrix. + + metric_params : dict, default=None + Arguments passed to the distance metric. + + Returns + ------- + single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype + The single-linkage tree tree (dendrogram) built from the MST. + """ + if metric == "precomputed": + if X.shape[0] != X.shape[1]: + raise ValueError( + "The precomputed distance matrix is expected to be symmetric, however" + f" it has shape {X.shape}. Please verify that the" + " distance matrix was constructed correctly." + ) + if not _allclose_dense_sparse(X, X.T): + raise ValueError( + "The precomputed distance matrix is expected to be symmetric, however" + " its values appear to be asymmetric. Please verify that the distance" + " matrix was constructed correctly." + ) + + distance_matrix = X.copy() if copy else X + else: + distance_matrix = pairwise_distances( + X, metric=metric, n_jobs=n_jobs, **metric_params + ) + distance_matrix /= alpha + + max_distance = metric_params.get("max_distance", 0.0) + if issparse(distance_matrix) and distance_matrix.format != "csr": + # we need CSR format to avoid a conversion in `_brute_mst` when calling + # `csgraph.connected_components` + distance_matrix = distance_matrix.tocsr() + + # Note that `distance_matrix` is manipulated in-place, however we do not + # need it for anything else past this point, hence the operation is safe. + mutual_reachability_ = mutual_reachability_graph( + distance_matrix, min_samples=min_samples, max_distance=max_distance + ) + min_spanning_tree = _brute_mst(mutual_reachability_, min_samples=min_samples) + # Warn if the MST couldn't be constructed around the missing distances + if np.isinf(min_spanning_tree["distance"]).any(): + warn( + ( + "The minimum spanning tree contains edge weights with value " + "infinity. Potentially, you are missing too many distances " + "in the initial distance matrix for the given neighborhood " + "size." + ), + UserWarning, + ) + return _process_mst(min_spanning_tree) + + +def _hdbscan_prims( + X, + algo, + min_samples=5, + alpha=1.0, + metric="euclidean", + leaf_size=40, + n_jobs=None, + **metric_params, +): + """ + Builds a single-linkage tree (SLT) from the input data `X`. If + `metric="precomputed"` then `X` must be a symmetric array of distances. + Otherwise, the pairwise distances are calculated directly and passed to + `mutual_reachability_graph`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The raw data. + + min_samples : int, default=None + The number of samples in a neighborhood for a point + to be considered as a core point. This includes the point itself. + + alpha : float, default=1.0 + A distance scaling parameter as used in robust single linkage. + + metric : str or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. `metric` must be one of the options allowed by + :func:`~sklearn.metrics.pairwise_distances` for its metric + parameter. + + n_jobs : int, default=None + The number of jobs to use for computing the pairwise distances. This + works by breaking down the pairwise matrix into n_jobs even slices and + computing them in parallel. This parameter is passed directly to + :func:`~sklearn.metrics.pairwise_distances`. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + copy : bool, default=False + If `copy=True` then any time an in-place modifications would be made + that would overwrite `X`, a copy will first be made, guaranteeing that + the original data will be unchanged. Currently, it only applies when + `metric="precomputed"`, when passing a dense array or a CSR sparse + array/matrix. + + metric_params : dict, default=None + Arguments passed to the distance metric. + + Returns + ------- + single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype + The single-linkage tree tree (dendrogram) built from the MST. + """ + # The Cython routines used require contiguous arrays + X = np.asarray(X, order="C") + + # Get distance to kth nearest neighbour + nbrs = NearestNeighbors( + n_neighbors=min_samples, + algorithm=algo, + leaf_size=leaf_size, + metric=metric, + metric_params=metric_params, + n_jobs=n_jobs, + p=None, + ).fit(X) + + neighbors_distances, _ = nbrs.kneighbors(X, min_samples, return_distance=True) + core_distances = np.ascontiguousarray(neighbors_distances[:, -1]) + dist_metric = DistanceMetric.get_metric(metric, **metric_params) + + # Mutual reachability distance is implicit in mst_from_data_matrix + min_spanning_tree = mst_from_data_matrix(X, core_distances, dist_metric, alpha) + return _process_mst(min_spanning_tree) + + +def remap_single_linkage_tree(tree, internal_to_raw, non_finite): + """ + Takes an internal single_linkage_tree structure and adds back in a set of points + that were initially detected as non-finite and returns that new tree. + These points will all be merged into the final node at np.inf distance and + considered noise points. + + Parameters + ---------- + tree : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype + The single-linkage tree tree (dendrogram) built from the MST. + internal_to_raw: dict + A mapping from internal integer index to the raw integer index + non_finite : ndarray + Boolean array of which entries in the raw data are non-finite + """ + finite_count = len(internal_to_raw) + + outlier_count = len(non_finite) + for i, _ in enumerate(tree): + left = tree[i]["left_node"] + right = tree[i]["right_node"] + + if left < finite_count: + tree[i]["left_node"] = internal_to_raw[left] + else: + tree[i]["left_node"] = left + outlier_count + if right < finite_count: + tree[i]["right_node"] = internal_to_raw[right] + else: + tree[i]["right_node"] = right + outlier_count + + outlier_tree = np.zeros(len(non_finite), dtype=HIERARCHY_dtype) + last_cluster_id = max( + tree[tree.shape[0] - 1]["left_node"], tree[tree.shape[0] - 1]["right_node"] + ) + last_cluster_size = tree[tree.shape[0] - 1]["cluster_size"] + for i, outlier in enumerate(non_finite): + outlier_tree[i] = (outlier, last_cluster_id + 1, np.inf, last_cluster_size + 1) + last_cluster_id += 1 + last_cluster_size += 1 + tree = np.concatenate([tree, outlier_tree]) + return tree + + +def _get_finite_row_indices(matrix): + """ + Returns the indices of the purely finite rows of a + sparse matrix or dense ndarray + """ + if issparse(matrix): + row_indices = np.array( + [i for i, row in enumerate(matrix.tolil().data) if np.all(np.isfinite(row))] + ) + else: + (row_indices,) = np.isfinite(matrix.sum(axis=1)).nonzero() + return row_indices + + +class HDBSCAN(ClusterMixin, BaseEstimator): + """Cluster data using hierarchical density-based clustering. + + HDBSCAN - Hierarchical Density-Based Spatial Clustering of Applications + with Noise. Performs :class:`~sklearn.cluster.DBSCAN` over varying epsilon + values and integrates the result to find a clustering that gives the best + stability over epsilon. + This allows HDBSCAN to find clusters of varying densities (unlike + :class:`~sklearn.cluster.DBSCAN`), and be more robust to parameter selection. + Read more in the :ref:`User Guide `. + + For an example of how to use HDBSCAN, as well as a comparison to + :class:`~sklearn.cluster.DBSCAN`, please see the :ref:`plotting demo + `. + + .. versionadded:: 1.3 + + Parameters + ---------- + min_cluster_size : int, default=5 + The minimum number of samples in a group for that group to be + considered a cluster; groupings smaller than this size will be left + as noise. + + min_samples : int, default=None + The number of samples in a neighborhood for a point + to be considered as a core point. This includes the point itself. + When `None`, defaults to `min_cluster_size`. + + cluster_selection_epsilon : float, default=0.0 + A distance threshold. Clusters below this value will be merged. + See [5]_ for more information. + + max_cluster_size : int, default=None + A limit to the size of clusters returned by the `"eom"` cluster + selection algorithm. There is no limit when `max_cluster_size=None`. + Has no effect if `cluster_selection_method="leaf"`. + + metric : str or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. + + - If metric is a string or callable, it must be one of + the options allowed by :func:`~sklearn.metrics.pairwise_distances` + for its metric parameter. + + - If metric is "precomputed", X is assumed to be a distance matrix and + must be square. + + metric_params : dict, default=None + Arguments passed to the distance metric. + + alpha : float, default=1.0 + A distance scaling parameter as used in robust single linkage. + See [3]_ for more information. + + algorithm : {"auto", "brute", "kd_tree", "ball_tree"}, default="auto" + Exactly which algorithm to use for computing core distances; By default + this is set to `"auto"` which attempts to use a + :class:`~sklearn.neighbors.KDTree` tree if possible, otherwise it uses + a :class:`~sklearn.neighbors.BallTree` tree. Both `"kd_tree"` and + `"ball_tree"` algorithms use the + :class:`~sklearn.neighbors.NearestNeighbors` estimator. + + If the `X` passed during `fit` is sparse or `metric` is invalid for + both :class:`~sklearn.neighbors.KDTree` and + :class:`~sklearn.neighbors.BallTree`, then it resolves to use the + `"brute"` algorithm. + + .. deprecated:: 1.4 + The `'kdtree'` option was deprecated in version 1.4, + and will be renamed to `'kd_tree'` in 1.6. + + .. deprecated:: 1.4 + The `'balltree'` option was deprecated in version 1.4, + and will be renamed to `'ball_tree'` in 1.6. + + leaf_size : int, default=40 + Leaf size for trees responsible for fast nearest neighbour queries when + a KDTree or a BallTree are used as core-distance algorithms. A large + dataset size and small `leaf_size` may induce excessive memory usage. + If you are running out of memory consider increasing the `leaf_size` + parameter. Ignored for `algorithm="brute"`. + + n_jobs : int, default=None + Number of jobs to run in parallel to calculate distances. + `None` means 1 unless in a :obj:`joblib.parallel_backend` context. + `-1` means using all processors. See :term:`Glossary ` + for more details. + + cluster_selection_method : {"eom", "leaf"}, default="eom" + The method used to select clusters from the condensed tree. The + standard approach for HDBSCAN* is to use an Excess of Mass (`"eom"`) + algorithm to find the most persistent clusters. Alternatively you can + instead select the clusters at the leaves of the tree -- this provides + the most fine grained and homogeneous clusters. + + allow_single_cluster : bool, default=False + By default HDBSCAN* will not produce a single cluster, setting this + to True will override this and allow single cluster results in + the case that you feel this is a valid result for your dataset. + + store_centers : str, default=None + Which, if any, cluster centers to compute and store. The options are: + + - `None` which does not compute nor store any centers. + - `"centroid"` which calculates the center by taking the weighted + average of their positions. Note that the algorithm uses the + euclidean metric and does not guarantee that the output will be + an observed data point. + - `"medoid"` which calculates the center by taking the point in the + fitted data which minimizes the distance to all other points in + the cluster. This is slower than "centroid" since it requires + computing additional pairwise distances between points of the + same cluster but guarantees the output is an observed data point. + The medoid is also well-defined for arbitrary metrics, and does not + depend on a euclidean metric. + - `"both"` which computes and stores both forms of centers. + + copy : bool, default=False + If `copy=True` then any time an in-place modifications would be made + that would overwrite data passed to :term:`fit`, a copy will first be + made, guaranteeing that the original data will be unchanged. + Currently, it only applies when `metric="precomputed"`, when passing + a dense array or a CSR sparse matrix and when `algorithm="brute"`. + + Attributes + ---------- + labels_ : ndarray of shape (n_samples,) + Cluster labels for each point in the dataset given to :term:`fit`. + Outliers are labeled as follows: + + - Noisy samples are given the label -1. + - Samples with infinite elements (+/- np.inf) are given the label -2. + - Samples with missing data are given the label -3, even if they + also have infinite elements. + + probabilities_ : ndarray of shape (n_samples,) + The strength with which each sample is a member of its assigned + cluster. + + - Clustered samples have probabilities proportional to the degree that + they persist as part of the cluster. + - Noisy samples have probability zero. + - Samples with infinite elements (+/- np.inf) have probability 0. + - Samples with missing data have probability `np.nan`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + centroids_ : ndarray of shape (n_clusters, n_features) + A collection containing the centroid of each cluster calculated under + the standard euclidean metric. The centroids may fall "outside" their + respective clusters if the clusters themselves are non-convex. + + Note that `n_clusters` only counts non-outlier clusters. That is to + say, the `-1, -2, -3` labels for the outlier clusters are excluded. + + medoids_ : ndarray of shape (n_clusters, n_features) + A collection containing the medoid of each cluster calculated under + the whichever metric was passed to the `metric` parameter. The + medoids are points in the original cluster which minimize the average + distance to all other points in that cluster under the chosen metric. + These can be thought of as the result of projecting the `metric`-based + centroid back onto the cluster. + + Note that `n_clusters` only counts non-outlier clusters. That is to + say, the `-1, -2, -3` labels for the outlier clusters are excluded. + + See Also + -------- + DBSCAN : Density-Based Spatial Clustering of Applications + with Noise. + OPTICS : Ordering Points To Identify the Clustering Structure. + Birch : Memory-efficient, online-learning algorithm. + + References + ---------- + + .. [1] :doi:`Campello, R. J., Moulavi, D., & Sander, J. Density-based clustering + based on hierarchical density estimates. + <10.1007/978-3-642-37456-2_14>` + .. [2] :doi:`Campello, R. J., Moulavi, D., Zimek, A., & Sander, J. + Hierarchical density estimates for data clustering, visualization, + and outlier detection.<10.1145/2733381>` + + .. [3] `Chaudhuri, K., & Dasgupta, S. Rates of convergence for the + cluster tree. + `_ + + .. [4] `Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and + Sander, J. Density-Based Clustering Validation. + `_ + + .. [5] :arxiv:`Malzer, C., & Baum, M. "A Hybrid Approach To Hierarchical + Density-based Cluster Selection."<1911.02282>`. + + Examples + -------- + >>> from sklearn.cluster import HDBSCAN + >>> from sklearn.datasets import load_digits + >>> X, _ = load_digits(return_X_y=True) + >>> hdb = HDBSCAN(min_cluster_size=20) + >>> hdb.fit(X) + HDBSCAN(min_cluster_size=20) + >>> hdb.labels_ + array([ 2, 6, -1, ..., -1, -1, -1]) + """ + + _parameter_constraints = { + "min_cluster_size": [Interval(Integral, left=2, right=None, closed="left")], + "min_samples": [Interval(Integral, left=1, right=None, closed="left"), None], + "cluster_selection_epsilon": [ + Interval(Real, left=0, right=None, closed="left") + ], + "max_cluster_size": [ + None, + Interval(Integral, left=1, right=None, closed="left"), + ], + "metric": [StrOptions(FAST_METRICS | {"precomputed"}), callable], + "metric_params": [dict, None], + "alpha": [Interval(Real, left=0, right=None, closed="neither")], + # TODO(1.6): Remove "kdtree" and "balltree" option + "algorithm": [ + StrOptions( + {"auto", "brute", "kd_tree", "ball_tree", "kdtree", "balltree"}, + deprecated={"kdtree", "balltree"}, + ), + ], + "leaf_size": [Interval(Integral, left=1, right=None, closed="left")], + "n_jobs": [Integral, None], + "cluster_selection_method": [StrOptions({"eom", "leaf"})], + "allow_single_cluster": ["boolean"], + "store_centers": [None, StrOptions({"centroid", "medoid", "both"})], + "copy": ["boolean"], + } + + def __init__( + self, + min_cluster_size=5, + min_samples=None, + cluster_selection_epsilon=0.0, + max_cluster_size=None, + metric="euclidean", + metric_params=None, + alpha=1.0, + algorithm="auto", + leaf_size=40, + n_jobs=None, + cluster_selection_method="eom", + allow_single_cluster=False, + store_centers=None, + copy=False, + ): + self.min_cluster_size = min_cluster_size + self.min_samples = min_samples + self.alpha = alpha + self.max_cluster_size = max_cluster_size + self.cluster_selection_epsilon = cluster_selection_epsilon + self.metric = metric + self.metric_params = metric_params + self.algorithm = algorithm + self.leaf_size = leaf_size + self.n_jobs = n_jobs + self.cluster_selection_method = cluster_selection_method + self.allow_single_cluster = allow_single_cluster + self.store_centers = store_centers + self.copy = copy + + @_fit_context( + # HDBSCAN.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Find clusters based on hierarchical density-based clustering. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ + ndarray of shape (n_samples, n_samples) + A feature array, or array of distances between samples if + `metric='precomputed'`. + + y : None + Ignored. + + Returns + ------- + self : object + Returns self. + """ + if self.metric == "precomputed" and self.store_centers is not None: + raise ValueError( + "Cannot store centers when using a precomputed distance matrix." + ) + + self._metric_params = self.metric_params or {} + if self.metric != "precomputed": + # Non-precomputed matrices may contain non-finite values. + X = self._validate_data( + X, + accept_sparse=["csr", "lil"], + force_all_finite=False, + dtype=np.float64, + ) + self._raw_data = X + all_finite = True + try: + _assert_all_finite(X.data if issparse(X) else X) + except ValueError: + all_finite = False + + if not all_finite: + # Pass only the purely finite indices into hdbscan + # We will later assign all non-finite points their + # corresponding labels, as specified in `_OUTLIER_ENCODING` + + # Reduce X to make the checks for missing/outlier samples more + # convenient. + reduced_X = X.sum(axis=1) + + # Samples with missing data are denoted by the presence of + # `np.nan` + missing_index = np.isnan(reduced_X).nonzero()[0] + + # Outlier samples are denoted by the presence of `np.inf` + infinite_index = np.isinf(reduced_X).nonzero()[0] + + # Continue with only finite samples + finite_index = _get_finite_row_indices(X) + internal_to_raw = {x: y for x, y in enumerate(finite_index)} + X = X[finite_index] + elif issparse(X): + # Handle sparse precomputed distance matrices separately + X = self._validate_data( + X, + accept_sparse=["csr", "lil"], + dtype=np.float64, + ) + else: + # Only non-sparse, precomputed distance matrices are handled here + # and thereby allowed to contain numpy.inf for missing distances + + # Perform data validation after removing infinite values (numpy.inf) + # from the given distance matrix. + X = self._validate_data(X, force_all_finite=False, dtype=np.float64) + if np.isnan(X).any(): + # TODO: Support np.nan in Cython implementation for precomputed + # dense HDBSCAN + raise ValueError("np.nan values found in precomputed-dense") + if X.shape[0] == 1: + raise ValueError("n_samples=1 while HDBSCAN requires more than one sample") + self._min_samples = ( + self.min_cluster_size if self.min_samples is None else self.min_samples + ) + + if self._min_samples > X.shape[0]: + raise ValueError( + f"min_samples ({self._min_samples}) must be at most the number of" + f" samples in X ({X.shape[0]})" + ) + + # TODO(1.6): Remove + if self.algorithm == "kdtree": + warn( + ( + "`algorithm='kdtree'`has been deprecated in 1.4 and will be renamed" + " to'kd_tree'`in 1.6. To keep the past behaviour, set" + " `algorithm='kd_tree'`." + ), + FutureWarning, + ) + self.algorithm = "kd_tree" + + # TODO(1.6): Remove + if self.algorithm == "balltree": + warn( + ( + "`algorithm='balltree'`has been deprecated in 1.4 and will be" + " renamed to'ball_tree'`in 1.6. To keep the past behaviour, set" + " `algorithm='ball_tree'`." + ), + FutureWarning, + ) + self.algorithm = "ball_tree" + + mst_func = None + kwargs = dict( + X=X, + min_samples=self._min_samples, + alpha=self.alpha, + metric=self.metric, + n_jobs=self.n_jobs, + **self._metric_params, + ) + if self.algorithm == "kd_tree" and self.metric not in KDTree.valid_metrics: + raise ValueError( + f"{self.metric} is not a valid metric for a KDTree-based algorithm." + " Please select a different metric." + ) + elif ( + self.algorithm == "ball_tree" and self.metric not in BallTree.valid_metrics + ): + raise ValueError( + f"{self.metric} is not a valid metric for a BallTree-based algorithm." + " Please select a different metric." + ) + + if self.algorithm != "auto": + if ( + self.metric != "precomputed" + and issparse(X) + and self.algorithm != "brute" + ): + raise ValueError("Sparse data matrices only support algorithm `brute`.") + + if self.algorithm == "brute": + mst_func = _hdbscan_brute + kwargs["copy"] = self.copy + elif self.algorithm == "kd_tree": + mst_func = _hdbscan_prims + kwargs["algo"] = "kd_tree" + kwargs["leaf_size"] = self.leaf_size + else: + mst_func = _hdbscan_prims + kwargs["algo"] = "ball_tree" + kwargs["leaf_size"] = self.leaf_size + else: + if issparse(X) or self.metric not in FAST_METRICS: + # We can't do much with sparse matrices ... + mst_func = _hdbscan_brute + kwargs["copy"] = self.copy + elif self.metric in KDTree.valid_metrics: + # TODO: Benchmark KD vs Ball Tree efficiency + mst_func = _hdbscan_prims + kwargs["algo"] = "kd_tree" + kwargs["leaf_size"] = self.leaf_size + else: + # Metric is a valid BallTree metric + mst_func = _hdbscan_prims + kwargs["algo"] = "ball_tree" + kwargs["leaf_size"] = self.leaf_size + + self._single_linkage_tree_ = mst_func(**kwargs) + + self.labels_, self.probabilities_ = tree_to_labels( + self._single_linkage_tree_, + self.min_cluster_size, + self.cluster_selection_method, + self.allow_single_cluster, + self.cluster_selection_epsilon, + self.max_cluster_size, + ) + if self.metric != "precomputed" and not all_finite: + # Remap indices to align with original data in the case of + # non-finite entries. Samples with np.inf are mapped to -1 and + # those with np.nan are mapped to -2. + self._single_linkage_tree_ = remap_single_linkage_tree( + self._single_linkage_tree_, + internal_to_raw, + # There may be overlap for points w/ both `np.inf` and `np.nan` + non_finite=set(np.hstack([infinite_index, missing_index])), + ) + new_labels = np.empty(self._raw_data.shape[0], dtype=np.int32) + new_labels[finite_index] = self.labels_ + new_labels[infinite_index] = _OUTLIER_ENCODING["infinite"]["label"] + new_labels[missing_index] = _OUTLIER_ENCODING["missing"]["label"] + self.labels_ = new_labels + + new_probabilities = np.zeros(self._raw_data.shape[0], dtype=np.float64) + new_probabilities[finite_index] = self.probabilities_ + # Infinite outliers have probability 0 by convention, though this + # is arbitrary. + new_probabilities[infinite_index] = _OUTLIER_ENCODING["infinite"]["prob"] + new_probabilities[missing_index] = _OUTLIER_ENCODING["missing"]["prob"] + self.probabilities_ = new_probabilities + + if self.store_centers: + self._weighted_cluster_center(X) + return self + + def fit_predict(self, X, y=None): + """Cluster X and return the associated cluster labels. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ + ndarray of shape (n_samples, n_samples) + A feature array, or array of distances between samples if + `metric='precomputed'`. + + y : None + Ignored. + + Returns + ------- + y : ndarray of shape (n_samples,) + Cluster labels. + """ + self.fit(X) + return self.labels_ + + def _weighted_cluster_center(self, X): + """Calculate and store the centroids/medoids of each cluster. + + This requires `X` to be a raw feature array, not precomputed + distances. Rather than return outputs directly, this helper method + instead stores them in the `self.{centroids, medoids}_` attributes. + The choice for which attributes are calculated and stored is mediated + by the value of `self.store_centers`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The feature array that the estimator was fit with. + + """ + # Number of non-noise clusters + n_clusters = len(set(self.labels_) - {-1, -2}) + mask = np.empty((X.shape[0],), dtype=np.bool_) + make_centroids = self.store_centers in ("centroid", "both") + make_medoids = self.store_centers in ("medoid", "both") + + if make_centroids: + self.centroids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64) + if make_medoids: + self.medoids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64) + + # Need to handle iteratively seen each cluster may have a different + # number of samples, hence we can't create a homogeneous 3D array. + for idx in range(n_clusters): + mask = self.labels_ == idx + data = X[mask] + strength = self.probabilities_[mask] + if make_centroids: + self.centroids_[idx] = np.average(data, weights=strength, axis=0) + if make_medoids: + # TODO: Implement weighted argmin PWD backend + dist_mat = pairwise_distances( + data, metric=self.metric, **self._metric_params + ) + dist_mat = dist_mat * strength + medoid_index = np.argmin(dist_mat.sum(axis=1)) + self.medoids_[idx] = data[medoid_index] + return + + def dbscan_clustering(self, cut_distance, min_cluster_size=5): + """Return clustering given by DBSCAN without border points. + + Return clustering that would be equivalent to running DBSCAN* for a + particular cut_distance (or epsilon) DBSCAN* can be thought of as + DBSCAN without the border points. As such these results may differ + slightly from `cluster.DBSCAN` due to the difference in implementation + over the non-core points. + + This can also be thought of as a flat clustering derived from constant + height cut through the single linkage tree. + + This represents the result of selecting a cut value for robust single linkage + clustering. The `min_cluster_size` allows the flat clustering to declare noise + points (and cluster smaller than `min_cluster_size`). + + Parameters + ---------- + cut_distance : float + The mutual reachability distance cut value to use to generate a + flat clustering. + + min_cluster_size : int, default=5 + Clusters smaller than this value with be called 'noise' and remain + unclustered in the resulting flat clustering. + + Returns + ------- + labels : ndarray of shape (n_samples,) + An array of cluster labels, one per datapoint. + Outliers are labeled as follows: + + - Noisy samples are given the label -1. + - Samples with infinite elements (+/- np.inf) are given the label -2. + - Samples with missing data are given the label -3, even if they + also have infinite elements. + """ + labels = labelling_at_cut( + self._single_linkage_tree_, cut_distance, min_cluster_size + ) + # Infer indices from labels generated during `fit` + infinite_index = self.labels_ == _OUTLIER_ENCODING["infinite"]["label"] + missing_index = self.labels_ == _OUTLIER_ENCODING["missing"]["label"] + + # Overwrite infinite/missing outlier samples (otherwise simple noise) + labels[infinite_index] = _OUTLIER_ENCODING["infinite"]["label"] + labels[missing_index] = _OUTLIER_ENCODING["missing"]["label"] + return labels + + def _more_tags(self): + return {"allow_nan": self.metric != "precomputed"} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7553248a0c603020f127973a9ee91caa0fe4e22a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/test_reachibility.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/test_reachibility.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43acb41b058cd096d3eb6b9f843b267ba229e522 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/test_reachibility.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/test_reachibility.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/test_reachibility.py new file mode 100644 index 0000000000000000000000000000000000000000..53096dd7cbec7953e19018e6aeca4e6027c2625b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/test_reachibility.py @@ -0,0 +1,63 @@ +import numpy as np +import pytest + +from sklearn.cluster._hdbscan._reachability import mutual_reachability_graph +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, +) + + +def test_mutual_reachability_graph_error_sparse_format(): + """Check that we raise an error if the sparse format is not CSR.""" + rng = np.random.RandomState(0) + X = rng.randn(10, 10) + X = X.T @ X + np.fill_diagonal(X, 0.0) + X = _convert_container(X, "sparse_csc") + + err_msg = "Only sparse CSR matrices are supported" + with pytest.raises(ValueError, match=err_msg): + mutual_reachability_graph(X) + + +@pytest.mark.parametrize("array_type", ["array", "sparse_csr"]) +def test_mutual_reachability_graph_inplace(array_type): + """Check that the operation is happening inplace.""" + rng = np.random.RandomState(0) + X = rng.randn(10, 10) + X = X.T @ X + np.fill_diagonal(X, 0.0) + X = _convert_container(X, array_type) + + mr_graph = mutual_reachability_graph(X) + + assert id(mr_graph) == id(X) + + +def test_mutual_reachability_graph_equivalence_dense_sparse(): + """Check that we get the same results for dense and sparse implementation.""" + rng = np.random.RandomState(0) + X = rng.randn(5, 5) + X_dense = X.T @ X + X_sparse = _convert_container(X_dense, "sparse_csr") + + mr_graph_dense = mutual_reachability_graph(X_dense, min_samples=3) + mr_graph_sparse = mutual_reachability_graph(X_sparse, min_samples=3) + + assert_allclose(mr_graph_dense, mr_graph_sparse.toarray()) + + +@pytest.mark.parametrize("array_type", ["array", "sparse_csr"]) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_mutual_reachability_graph_preserve_dtype(array_type, dtype): + """Check that the computation preserve dtype thanks to fused types.""" + rng = np.random.RandomState(0) + X = rng.randn(10, 10) + X = (X.T @ X).astype(dtype) + np.fill_diagonal(X, 0.0) + X = _convert_container(X, array_type) + + assert X.dtype == dtype + mr_graph = mutual_reachability_graph(X) + assert mr_graph.dtype == dtype diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..67487cccea374a3bd124ba2166c1e7d7c363e4b4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a10f8c12f34402c872ccc3bd7c14266dcc9b5e7a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.pxd @@ -0,0 +1,9 @@ +from ..utils._typedefs cimport intp_t + +cdef class UnionFind: + cdef intp_t next_label + cdef intp_t[:] parent + cdef intp_t[:] size + + cdef void union(self, intp_t m, intp_t n) noexcept + cdef intp_t fast_find(self, intp_t n) noexcept diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..54c057631c1b32be60da76c17e56bc71ddb90d29 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9a41ea68d1bafc0cad55c028e0413e463ddb6d2e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.pxd @@ -0,0 +1,48 @@ +from cython cimport floating + + +cdef floating _euclidean_dense_dense( + const floating*, + const floating*, + int, + bint +) noexcept nogil + +cdef floating _euclidean_sparse_dense( + const floating[::1], + const int[::1], + const floating[::1], + floating, + bint +) noexcept nogil + +cpdef void _relocate_empty_clusters_dense( + const floating[:, ::1], + const floating[::1], + const floating[:, ::1], + floating[:, ::1], + floating[::1], + const int[::1] +) + +cpdef void _relocate_empty_clusters_sparse( + const floating[::1], + const int[::1], + const int[::1], + const floating[::1], + const floating[:, ::1], + floating[:, ::1], + floating[::1], + const int[::1] +) + +cdef void _average_centers( + floating[:, ::1], + const floating[::1] +) + +cdef void _center_shift( + const floating[:, ::1], + const floating[:, ::1], + floating[::1] +) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_elkan.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_elkan.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..175a86ac7952c024ac29981bba5f8b26def1d51c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_elkan.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_lloyd.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_lloyd.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e028fd42843dfb803237c8284bd508654a7de8c4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_lloyd.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_minibatch.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_minibatch.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ce010c7e3fef2c62e9e0604d8ddd9f111df630f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_k_means_minibatch.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_kmeans.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_kmeans.py new file mode 100644 index 0000000000000000000000000000000000000000..178242e60be578b9244df87692bd1f74fa4a49a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_kmeans.py @@ -0,0 +1,2318 @@ +"""K-means clustering.""" + +# Authors: Gael Varoquaux +# Thomas Rueckstiess +# James Bergstra +# Jan Schlueter +# Nelle Varoquaux +# Peter Prettenhofer +# Olivier Grisel +# Mathieu Blondel +# Robert Layton +# License: BSD 3 clause + +import warnings +from abc import ABC, abstractmethod +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + ClusterMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..metrics.pairwise import _euclidean_distances, euclidean_distances +from ..utils import check_array, check_random_state +from ..utils._openmp_helpers import _openmp_effective_n_threads +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import row_norms, stable_cumsum +from ..utils.fixes import threadpool_info, threadpool_limits +from ..utils.sparsefuncs import mean_variance_axis +from ..utils.sparsefuncs_fast import assign_rows_csr +from ..utils.validation import ( + _check_sample_weight, + _is_arraylike_not_scalar, + check_is_fitted, +) +from ._k_means_common import ( + CHUNK_SIZE, + _inertia_dense, + _inertia_sparse, + _is_same_clustering, +) +from ._k_means_elkan import ( + elkan_iter_chunked_dense, + elkan_iter_chunked_sparse, + init_bounds_dense, + init_bounds_sparse, +) +from ._k_means_lloyd import lloyd_iter_chunked_dense, lloyd_iter_chunked_sparse +from ._k_means_minibatch import _minibatch_update_dense, _minibatch_update_sparse + +############################################################################### +# Initialization heuristic + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "n_clusters": [Interval(Integral, 1, None, closed="left")], + "sample_weight": ["array-like", None], + "x_squared_norms": ["array-like", None], + "random_state": ["random_state"], + "n_local_trials": [Interval(Integral, 1, None, closed="left"), None], + }, + prefer_skip_nested_validation=True, +) +def kmeans_plusplus( + X, + n_clusters, + *, + sample_weight=None, + x_squared_norms=None, + random_state=None, + n_local_trials=None, +): + """Init n_clusters seeds according to k-means++. + + .. versionadded:: 0.24 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to pick seeds from. + + n_clusters : int + The number of centroids to initialize. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in `X`. If `None`, all observations + are assigned equal weight. `sample_weight` is ignored if `init` + is a callable or a user provided array. + + .. versionadded:: 1.3 + + x_squared_norms : array-like of shape (n_samples,), default=None + Squared Euclidean norm of each data point. + + random_state : int or RandomState instance, default=None + Determines random number generation for centroid initialization. Pass + an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + n_local_trials : int, default=None + The number of seeding trials for each center (except the first), + of which the one reducing inertia the most is greedily chosen. + Set to None to make the number of trials depend logarithmically + on the number of seeds (2+log(k)) which is the recommended setting. + Setting to 1 disables the greedy cluster selection and recovers the + vanilla k-means++ algorithm which was empirically shown to work less + well than its greedy variant. + + Returns + ------- + centers : ndarray of shape (n_clusters, n_features) + The initial centers for k-means. + + indices : ndarray of shape (n_clusters,) + The index location of the chosen centers in the data array X. For a + given index and center, X[index] = center. + + Notes + ----- + Selects initial cluster centers for k-mean clustering in a smart way + to speed up convergence. see: Arthur, D. and Vassilvitskii, S. + "k-means++: the advantages of careful seeding". ACM-SIAM symposium + on Discrete algorithms. 2007 + + Examples + -------- + + >>> from sklearn.cluster import kmeans_plusplus + >>> import numpy as np + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [10, 2], [10, 4], [10, 0]]) + >>> centers, indices = kmeans_plusplus(X, n_clusters=2, random_state=0) + >>> centers + array([[10, 2], + [ 1, 0]]) + >>> indices + array([3, 2]) + """ + # Check data + check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32]) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + if X.shape[0] < n_clusters: + raise ValueError( + f"n_samples={X.shape[0]} should be >= n_clusters={n_clusters}." + ) + + # Check parameters + if x_squared_norms is None: + x_squared_norms = row_norms(X, squared=True) + else: + x_squared_norms = check_array(x_squared_norms, dtype=X.dtype, ensure_2d=False) + + if x_squared_norms.shape[0] != X.shape[0]: + raise ValueError( + f"The length of x_squared_norms {x_squared_norms.shape[0]} should " + f"be equal to the length of n_samples {X.shape[0]}." + ) + + random_state = check_random_state(random_state) + + # Call private k-means++ + centers, indices = _kmeans_plusplus( + X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials + ) + + return centers, indices + + +def _kmeans_plusplus( + X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials=None +): + """Computational component for initialization of n_clusters by + k-means++. Prior validation of data is assumed. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The data to pick seeds for. + + n_clusters : int + The number of seeds to choose. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in `X`. + + x_squared_norms : ndarray of shape (n_samples,) + Squared Euclidean norm of each data point. + + random_state : RandomState instance + The generator used to initialize the centers. + See :term:`Glossary `. + + n_local_trials : int, default=None + The number of seeding trials for each center (except the first), + of which the one reducing inertia the most is greedily chosen. + Set to None to make the number of trials depend logarithmically + on the number of seeds (2+log(k)); this is the default. + + Returns + ------- + centers : ndarray of shape (n_clusters, n_features) + The initial centers for k-means. + + indices : ndarray of shape (n_clusters,) + The index location of the chosen centers in the data array X. For a + given index and center, X[index] = center. + """ + n_samples, n_features = X.shape + + centers = np.empty((n_clusters, n_features), dtype=X.dtype) + + # Set the number of local seeding trials if none is given + if n_local_trials is None: + # This is what Arthur/Vassilvitskii tried, but did not report + # specific results for other than mentioning in the conclusion + # that it helped. + n_local_trials = 2 + int(np.log(n_clusters)) + + # Pick first center randomly and track index of point + center_id = random_state.choice(n_samples, p=sample_weight / sample_weight.sum()) + indices = np.full(n_clusters, -1, dtype=int) + if sp.issparse(X): + centers[0] = X[[center_id]].toarray() + else: + centers[0] = X[center_id] + indices[0] = center_id + + # Initialize list of closest distances and calculate current potential + closest_dist_sq = _euclidean_distances( + centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True + ) + current_pot = closest_dist_sq @ sample_weight + + # Pick the remaining n_clusters-1 points + for c in range(1, n_clusters): + # Choose center candidates by sampling with probability proportional + # to the squared distance to the closest existing center + rand_vals = random_state.uniform(size=n_local_trials) * current_pot + candidate_ids = np.searchsorted( + stable_cumsum(sample_weight * closest_dist_sq), rand_vals + ) + # XXX: numerical imprecision can result in a candidate_id out of range + np.clip(candidate_ids, None, closest_dist_sq.size - 1, out=candidate_ids) + + # Compute distances to center candidates + distance_to_candidates = _euclidean_distances( + X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True + ) + + # update closest distances squared and potential for each candidate + np.minimum(closest_dist_sq, distance_to_candidates, out=distance_to_candidates) + candidates_pot = distance_to_candidates @ sample_weight.reshape(-1, 1) + + # Decide which candidate is the best + best_candidate = np.argmin(candidates_pot) + current_pot = candidates_pot[best_candidate] + closest_dist_sq = distance_to_candidates[best_candidate] + best_candidate = candidate_ids[best_candidate] + + # Permanently add best center candidate found in local tries + if sp.issparse(X): + centers[c] = X[[best_candidate]].toarray() + else: + centers[c] = X[best_candidate] + indices[c] = best_candidate + + return centers, indices + + +############################################################################### +# K-means batch estimation by EM (expectation maximization) + + +def _tolerance(X, tol): + """Return a tolerance which is dependent on the dataset.""" + if tol == 0: + return 0 + if sp.issparse(X): + variances = mean_variance_axis(X, axis=0)[1] + else: + variances = np.var(X, axis=0) + return np.mean(variances) * tol + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "sample_weight": ["array-like", None], + "return_n_iter": [bool], + }, + prefer_skip_nested_validation=False, +) +def k_means( + X, + n_clusters, + *, + sample_weight=None, + init="k-means++", + n_init="auto", + max_iter=300, + verbose=False, + tol=1e-4, + random_state=None, + copy_x=True, + algorithm="lloyd", + return_n_iter=False, +): + """Perform K-means clustering algorithm. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The observations to cluster. It must be noted that the data + will be converted to C ordering, which will cause a memory copy + if the given data is not C-contiguous. + + n_clusters : int + The number of clusters to form as well as the number of + centroids to generate. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in `X`. If `None`, all observations + are assigned equal weight. `sample_weight` is not used during + initialization if `init` is a callable or a user provided array. + + init : {'k-means++', 'random'}, callable or array-like of shape \ + (n_clusters, n_features), default='k-means++' + Method for initialization: + + - `'k-means++'` : selects initial cluster centers for k-mean + clustering in a smart way to speed up convergence. See section + Notes in k_init for more details. + - `'random'`: choose `n_clusters` observations (rows) at random from data + for the initial centroids. + - If an array is passed, it should be of shape `(n_clusters, n_features)` + and gives the initial centers. + - If a callable is passed, it should take arguments `X`, `n_clusters` and a + random state and return an initialization. + + n_init : 'auto' or int, default="auto" + Number of time the k-means algorithm will be run with different + centroid seeds. The final results will be the best output of + n_init consecutive runs in terms of inertia. + + When `n_init='auto'`, the number of runs depends on the value of init: + 10 if using `init='random'` or `init` is a callable; + 1 if using `init='k-means++'` or `init` is an array-like. + + .. versionadded:: 1.2 + Added 'auto' option for `n_init`. + + .. versionchanged:: 1.4 + Default value for `n_init` changed to `'auto'`. + + max_iter : int, default=300 + Maximum number of iterations of the k-means algorithm to run. + + verbose : bool, default=False + Verbosity mode. + + tol : float, default=1e-4 + Relative tolerance with regards to Frobenius norm of the difference + in the cluster centers of two consecutive iterations to declare + convergence. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for centroid initialization. Use + an int to make the randomness deterministic. + See :term:`Glossary `. + + copy_x : bool, default=True + When pre-computing distances it is more numerically accurate to center + the data first. If `copy_x` is True (default), then the original data is + not modified. If False, the original data is modified, and put back + before the function returns, but small numerical differences may be + introduced by subtracting and then adding the data mean. Note that if + the original data is not C-contiguous, a copy will be made even if + `copy_x` is False. If the original data is sparse, but not in CSR format, + a copy will be made even if `copy_x` is False. + + algorithm : {"lloyd", "elkan"}, default="lloyd" + K-means algorithm to use. The classical EM-style algorithm is `"lloyd"`. + The `"elkan"` variation can be more efficient on some datasets with + well-defined clusters, by using the triangle inequality. However it's + more memory intensive due to the allocation of an extra array of shape + `(n_samples, n_clusters)`. + + .. versionchanged:: 0.18 + Added Elkan algorithm + + .. versionchanged:: 1.1 + Renamed "full" to "lloyd", and deprecated "auto" and "full". + Changed "auto" to use "lloyd" instead of "elkan". + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + Returns + ------- + centroid : ndarray of shape (n_clusters, n_features) + Centroids found at the last iteration of k-means. + + label : ndarray of shape (n_samples,) + The `label[i]` is the code or index of the centroid the + i'th observation is closest to. + + inertia : float + The final value of the inertia criterion (sum of squared distances to + the closest centroid for all observations in the training set). + + best_n_iter : int + Number of iterations corresponding to the best results. + Returned only if `return_n_iter` is set to True. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import k_means + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [10, 2], [10, 4], [10, 0]]) + >>> centroid, label, inertia = k_means( + ... X, n_clusters=2, n_init="auto", random_state=0 + ... ) + >>> centroid + array([[10., 2.], + [ 1., 2.]]) + >>> label + array([1, 1, 1, 0, 0, 0], dtype=int32) + >>> inertia + 16.0 + """ + est = KMeans( + n_clusters=n_clusters, + init=init, + n_init=n_init, + max_iter=max_iter, + verbose=verbose, + tol=tol, + random_state=random_state, + copy_x=copy_x, + algorithm=algorithm, + ).fit(X, sample_weight=sample_weight) + if return_n_iter: + return est.cluster_centers_, est.labels_, est.inertia_, est.n_iter_ + else: + return est.cluster_centers_, est.labels_, est.inertia_ + + +def _kmeans_single_elkan( + X, + sample_weight, + centers_init, + max_iter=300, + verbose=False, + tol=1e-4, + n_threads=1, +): + """A single run of k-means elkan, assumes preparation completed prior. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The observations to cluster. If sparse matrix, must be in CSR format. + + sample_weight : array-like of shape (n_samples,) + The weights for each observation in X. + + centers_init : ndarray of shape (n_clusters, n_features) + The initial centers. + + max_iter : int, default=300 + Maximum number of iterations of the k-means algorithm to run. + + verbose : bool, default=False + Verbosity mode. + + tol : float, default=1e-4 + Relative tolerance with regards to Frobenius norm of the difference + in the cluster centers of two consecutive iterations to declare + convergence. + It's not advised to set `tol=0` since convergence might never be + declared due to rounding errors. Use a very small number instead. + + n_threads : int, default=1 + The number of OpenMP threads to use for the computation. Parallelism is + sample-wise on the main cython loop which assigns each sample to its + closest center. + + Returns + ------- + centroid : ndarray of shape (n_clusters, n_features) + Centroids found at the last iteration of k-means. + + label : ndarray of shape (n_samples,) + label[i] is the code or index of the centroid the + i'th observation is closest to. + + inertia : float + The final value of the inertia criterion (sum of squared distances to + the closest centroid for all observations in the training set). + + n_iter : int + Number of iterations run. + """ + n_samples = X.shape[0] + n_clusters = centers_init.shape[0] + + # Buffers to avoid new allocations at each iteration. + centers = centers_init + centers_new = np.zeros_like(centers) + weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype) + labels = np.full(n_samples, -1, dtype=np.int32) + labels_old = labels.copy() + center_half_distances = euclidean_distances(centers) / 2 + distance_next_center = np.partition( + np.asarray(center_half_distances), kth=1, axis=0 + )[1] + upper_bounds = np.zeros(n_samples, dtype=X.dtype) + lower_bounds = np.zeros((n_samples, n_clusters), dtype=X.dtype) + center_shift = np.zeros(n_clusters, dtype=X.dtype) + + if sp.issparse(X): + init_bounds = init_bounds_sparse + elkan_iter = elkan_iter_chunked_sparse + _inertia = _inertia_sparse + else: + init_bounds = init_bounds_dense + elkan_iter = elkan_iter_chunked_dense + _inertia = _inertia_dense + + init_bounds( + X, + centers, + center_half_distances, + labels, + upper_bounds, + lower_bounds, + n_threads=n_threads, + ) + + strict_convergence = False + + for i in range(max_iter): + elkan_iter( + X, + sample_weight, + centers, + centers_new, + weight_in_clusters, + center_half_distances, + distance_next_center, + upper_bounds, + lower_bounds, + labels, + center_shift, + n_threads, + ) + + # compute new pairwise distances between centers and closest other + # center of each center for next iterations + center_half_distances = euclidean_distances(centers_new) / 2 + distance_next_center = np.partition( + np.asarray(center_half_distances), kth=1, axis=0 + )[1] + + if verbose: + inertia = _inertia(X, sample_weight, centers, labels, n_threads) + print(f"Iteration {i}, inertia {inertia}") + + centers, centers_new = centers_new, centers + + if np.array_equal(labels, labels_old): + # First check the labels for strict convergence. + if verbose: + print(f"Converged at iteration {i}: strict convergence.") + strict_convergence = True + break + else: + # No strict convergence, check for tol based convergence. + center_shift_tot = (center_shift**2).sum() + if center_shift_tot <= tol: + if verbose: + print( + f"Converged at iteration {i}: center shift " + f"{center_shift_tot} within tolerance {tol}." + ) + break + + labels_old[:] = labels + + if not strict_convergence: + # rerun E-step so that predicted labels match cluster centers + elkan_iter( + X, + sample_weight, + centers, + centers, + weight_in_clusters, + center_half_distances, + distance_next_center, + upper_bounds, + lower_bounds, + labels, + center_shift, + n_threads, + update_centers=False, + ) + + inertia = _inertia(X, sample_weight, centers, labels, n_threads) + + return labels, inertia, centers, i + 1 + + +def _kmeans_single_lloyd( + X, + sample_weight, + centers_init, + max_iter=300, + verbose=False, + tol=1e-4, + n_threads=1, +): + """A single run of k-means lloyd, assumes preparation completed prior. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The observations to cluster. If sparse matrix, must be in CSR format. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in X. + + centers_init : ndarray of shape (n_clusters, n_features) + The initial centers. + + max_iter : int, default=300 + Maximum number of iterations of the k-means algorithm to run. + + verbose : bool, default=False + Verbosity mode + + tol : float, default=1e-4 + Relative tolerance with regards to Frobenius norm of the difference + in the cluster centers of two consecutive iterations to declare + convergence. + It's not advised to set `tol=0` since convergence might never be + declared due to rounding errors. Use a very small number instead. + + n_threads : int, default=1 + The number of OpenMP threads to use for the computation. Parallelism is + sample-wise on the main cython loop which assigns each sample to its + closest center. + + Returns + ------- + centroid : ndarray of shape (n_clusters, n_features) + Centroids found at the last iteration of k-means. + + label : ndarray of shape (n_samples,) + label[i] is the code or index of the centroid the + i'th observation is closest to. + + inertia : float + The final value of the inertia criterion (sum of squared distances to + the closest centroid for all observations in the training set). + + n_iter : int + Number of iterations run. + """ + n_clusters = centers_init.shape[0] + + # Buffers to avoid new allocations at each iteration. + centers = centers_init + centers_new = np.zeros_like(centers) + labels = np.full(X.shape[0], -1, dtype=np.int32) + labels_old = labels.copy() + weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype) + center_shift = np.zeros(n_clusters, dtype=X.dtype) + + if sp.issparse(X): + lloyd_iter = lloyd_iter_chunked_sparse + _inertia = _inertia_sparse + else: + lloyd_iter = lloyd_iter_chunked_dense + _inertia = _inertia_dense + + strict_convergence = False + + # Threadpoolctl context to limit the number of threads in second level of + # nested parallelism (i.e. BLAS) to avoid oversubscription. + with threadpool_limits(limits=1, user_api="blas"): + for i in range(max_iter): + lloyd_iter( + X, + sample_weight, + centers, + centers_new, + weight_in_clusters, + labels, + center_shift, + n_threads, + ) + + if verbose: + inertia = _inertia(X, sample_weight, centers, labels, n_threads) + print(f"Iteration {i}, inertia {inertia}.") + + centers, centers_new = centers_new, centers + + if np.array_equal(labels, labels_old): + # First check the labels for strict convergence. + if verbose: + print(f"Converged at iteration {i}: strict convergence.") + strict_convergence = True + break + else: + # No strict convergence, check for tol based convergence. + center_shift_tot = (center_shift**2).sum() + if center_shift_tot <= tol: + if verbose: + print( + f"Converged at iteration {i}: center shift " + f"{center_shift_tot} within tolerance {tol}." + ) + break + + labels_old[:] = labels + + if not strict_convergence: + # rerun E-step so that predicted labels match cluster centers + lloyd_iter( + X, + sample_weight, + centers, + centers, + weight_in_clusters, + labels, + center_shift, + n_threads, + update_centers=False, + ) + + inertia = _inertia(X, sample_weight, centers, labels, n_threads) + + return labels, inertia, centers, i + 1 + + +def _labels_inertia(X, sample_weight, centers, n_threads=1, return_inertia=True): + """E step of the K-means EM algorithm. + + Compute the labels and the inertia of the given samples and centers. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input samples to assign to the labels. If sparse matrix, must + be in CSR format. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in X. + + x_squared_norms : ndarray of shape (n_samples,) + Precomputed squared euclidean norm of each data point, to speed up + computations. + + centers : ndarray of shape (n_clusters, n_features) + The cluster centers. + + n_threads : int, default=1 + The number of OpenMP threads to use for the computation. Parallelism is + sample-wise on the main cython loop which assigns each sample to its + closest center. + + return_inertia : bool, default=True + Whether to compute and return the inertia. + + Returns + ------- + labels : ndarray of shape (n_samples,) + The resulting assignment. + + inertia : float + Sum of squared distances of samples to their closest cluster center. + Inertia is only returned if return_inertia is True. + """ + n_samples = X.shape[0] + n_clusters = centers.shape[0] + + labels = np.full(n_samples, -1, dtype=np.int32) + center_shift = np.zeros(n_clusters, dtype=centers.dtype) + + if sp.issparse(X): + _labels = lloyd_iter_chunked_sparse + _inertia = _inertia_sparse + else: + _labels = lloyd_iter_chunked_dense + _inertia = _inertia_dense + + _labels( + X, + sample_weight, + centers, + centers_new=None, + weight_in_clusters=None, + labels=labels, + center_shift=center_shift, + n_threads=n_threads, + update_centers=False, + ) + + if return_inertia: + inertia = _inertia(X, sample_weight, centers, labels, n_threads) + return labels, inertia + + return labels + + +def _labels_inertia_threadpool_limit( + X, sample_weight, centers, n_threads=1, return_inertia=True +): + """Same as _labels_inertia but in a threadpool_limits context.""" + with threadpool_limits(limits=1, user_api="blas"): + result = _labels_inertia(X, sample_weight, centers, n_threads, return_inertia) + + return result + + +class _BaseKMeans( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, ClusterMixin, BaseEstimator, ABC +): + """Base class for KMeans and MiniBatchKMeans""" + + _parameter_constraints: dict = { + "n_clusters": [Interval(Integral, 1, None, closed="left")], + "init": [StrOptions({"k-means++", "random"}), callable, "array-like"], + "n_init": [ + StrOptions({"auto"}), + Interval(Integral, 1, None, closed="left"), + ], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + n_clusters, + *, + init, + n_init, + max_iter, + tol, + verbose, + random_state, + ): + self.n_clusters = n_clusters + self.init = init + self.max_iter = max_iter + self.tol = tol + self.n_init = n_init + self.verbose = verbose + self.random_state = random_state + + def _check_params_vs_input(self, X, default_n_init=None): + # n_clusters + if X.shape[0] < self.n_clusters: + raise ValueError( + f"n_samples={X.shape[0]} should be >= n_clusters={self.n_clusters}." + ) + + # tol + self._tol = _tolerance(X, self.tol) + + # n-init + if self.n_init == "auto": + if isinstance(self.init, str) and self.init == "k-means++": + self._n_init = 1 + elif isinstance(self.init, str) and self.init == "random": + self._n_init = default_n_init + elif callable(self.init): + self._n_init = default_n_init + else: # array-like + self._n_init = 1 + else: + self._n_init = self.n_init + + if _is_arraylike_not_scalar(self.init) and self._n_init != 1: + warnings.warn( + ( + "Explicit initial center position passed: performing only" + f" one init in {self.__class__.__name__} instead of " + f"n_init={self._n_init}." + ), + RuntimeWarning, + stacklevel=2, + ) + self._n_init = 1 + + @abstractmethod + def _warn_mkl_vcomp(self, n_active_threads): + """Issue an estimator specific warning when vcomp and mkl are both present + + This method is called by `_check_mkl_vcomp`. + """ + + def _check_mkl_vcomp(self, X, n_samples): + """Check when vcomp and mkl are both present""" + # The BLAS call inside a prange in lloyd_iter_chunked_dense is known to + # cause a small memory leak when there are less chunks than the number + # of available threads. It only happens when the OpenMP library is + # vcomp (microsoft OpenMP) and the BLAS library is MKL. see #18653 + if sp.issparse(X): + return + + n_active_threads = int(np.ceil(n_samples / CHUNK_SIZE)) + if n_active_threads < self._n_threads: + modules = threadpool_info() + has_vcomp = "vcomp" in [module["prefix"] for module in modules] + has_mkl = ("mkl", "intel") in [ + (module["internal_api"], module.get("threading_layer", None)) + for module in modules + ] + if has_vcomp and has_mkl: + self._warn_mkl_vcomp(n_active_threads) + + def _validate_center_shape(self, X, centers): + """Check if centers is compatible with X and n_clusters.""" + if centers.shape[0] != self.n_clusters: + raise ValueError( + f"The shape of the initial centers {centers.shape} does not " + f"match the number of clusters {self.n_clusters}." + ) + if centers.shape[1] != X.shape[1]: + raise ValueError( + f"The shape of the initial centers {centers.shape} does not " + f"match the number of features of the data {X.shape[1]}." + ) + + def _check_test_data(self, X): + X = self._validate_data( + X, + accept_sparse="csr", + reset=False, + dtype=[np.float64, np.float32], + order="C", + accept_large_sparse=False, + ) + return X + + def _init_centroids( + self, + X, + x_squared_norms, + init, + random_state, + sample_weight, + init_size=None, + n_centroids=None, + ): + """Compute the initial centroids. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input samples. + + x_squared_norms : ndarray of shape (n_samples,) + Squared euclidean norm of each data point. Pass it if you have it + at hands already to avoid it being recomputed here. + + init : {'k-means++', 'random'}, callable or ndarray of shape \ + (n_clusters, n_features) + Method for initialization. + + random_state : RandomState instance + Determines random number generation for centroid initialization. + See :term:`Glossary `. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in X. `sample_weight` is not used + during initialization if `init` is a callable or a user provided + array. + + init_size : int, default=None + Number of samples to randomly sample for speeding up the + initialization (sometimes at the expense of accuracy). + + n_centroids : int, default=None + Number of centroids to initialize. + If left to 'None' the number of centroids will be equal to + number of clusters to form (self.n_clusters). + + Returns + ------- + centers : ndarray of shape (n_clusters, n_features) + Initial centroids of clusters. + """ + n_samples = X.shape[0] + n_clusters = self.n_clusters if n_centroids is None else n_centroids + + if init_size is not None and init_size < n_samples: + init_indices = random_state.randint(0, n_samples, init_size) + X = X[init_indices] + x_squared_norms = x_squared_norms[init_indices] + n_samples = X.shape[0] + sample_weight = sample_weight[init_indices] + + if isinstance(init, str) and init == "k-means++": + centers, _ = _kmeans_plusplus( + X, + n_clusters, + random_state=random_state, + x_squared_norms=x_squared_norms, + sample_weight=sample_weight, + ) + elif isinstance(init, str) and init == "random": + seeds = random_state.choice( + n_samples, + size=n_clusters, + replace=False, + p=sample_weight / sample_weight.sum(), + ) + centers = X[seeds] + elif _is_arraylike_not_scalar(self.init): + centers = init + elif callable(init): + centers = init(X, n_clusters, random_state=random_state) + centers = check_array(centers, dtype=X.dtype, copy=False, order="C") + self._validate_center_shape(X, centers) + + if sp.issparse(centers): + centers = centers.toarray() + + return centers + + def fit_predict(self, X, y=None, sample_weight=None): + """Compute cluster centers and predict cluster index for each sample. + + Convenience method; equivalent to calling fit(X) followed by + predict(X). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data to transform. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Index of the cluster each sample belongs to. + """ + return self.fit(X, sample_weight=sample_weight).labels_ + + def predict(self, X, sample_weight="deprecated"): + """Predict the closest cluster each sample in X belongs to. + + In the vector quantization literature, `cluster_centers_` is called + the code book and each value returned by `predict` is the index of + the closest code in the code book. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data to predict. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. + + .. deprecated:: 1.3 + The parameter `sample_weight` is deprecated in version 1.3 + and will be removed in 1.5. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Index of the cluster each sample belongs to. + """ + check_is_fitted(self) + + X = self._check_test_data(X) + if not (isinstance(sample_weight, str) and sample_weight == "deprecated"): + warnings.warn( + ( + "'sample_weight' was deprecated in version 1.3 and " + "will be removed in 1.5." + ), + FutureWarning, + ) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + else: + sample_weight = _check_sample_weight(None, X, dtype=X.dtype) + + labels = _labels_inertia_threadpool_limit( + X, + sample_weight, + self.cluster_centers_, + n_threads=self._n_threads, + return_inertia=False, + ) + + return labels + + def fit_transform(self, X, y=None, sample_weight=None): + """Compute clustering and transform X to cluster-distance space. + + Equivalent to fit(X).transform(X), but more efficiently implemented. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data to transform. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_clusters) + X transformed in the new space. + """ + return self.fit(X, sample_weight=sample_weight)._transform(X) + + def transform(self, X): + """Transform X to a cluster-distance space. + + In the new space, each dimension is the distance to the cluster + centers. Note that even if X is sparse, the array returned by + `transform` will typically be dense. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data to transform. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_clusters) + X transformed in the new space. + """ + check_is_fitted(self) + + X = self._check_test_data(X) + return self._transform(X) + + def _transform(self, X): + """Guts of transform method; no input validation.""" + return euclidean_distances(X, self.cluster_centers_) + + def score(self, X, y=None, sample_weight=None): + """Opposite of the value of X on the K-means objective. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. + + Returns + ------- + score : float + Opposite of the value of X on the K-means objective. + """ + check_is_fitted(self) + + X = self._check_test_data(X) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + _, scores = _labels_inertia_threadpool_limit( + X, sample_weight, self.cluster_centers_, self._n_threads + ) + return -scores + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + }, + } + + +class KMeans(_BaseKMeans): + """K-Means clustering. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + + n_clusters : int, default=8 + The number of clusters to form as well as the number of + centroids to generate. + + For an example of how to choose an optimal value for `n_clusters` refer to + :ref:`sphx_glr_auto_examples_cluster_plot_kmeans_silhouette_analysis.py`. + + init : {'k-means++', 'random'}, callable or array-like of shape \ + (n_clusters, n_features), default='k-means++' + Method for initialization: + + * 'k-means++' : selects initial cluster centroids using sampling \ + based on an empirical probability distribution of the points' \ + contribution to the overall inertia. This technique speeds up \ + convergence. The algorithm implemented is "greedy k-means++". It \ + differs from the vanilla k-means++ by making several trials at \ + each sampling step and choosing the best centroid among them. + + * 'random': choose `n_clusters` observations (rows) at random from \ + data for the initial centroids. + + * If an array is passed, it should be of shape (n_clusters, n_features)\ + and gives the initial centers. + + * If a callable is passed, it should take arguments X, n_clusters and a\ + random state and return an initialization. + + For an example of how to use the different `init` strategy, see the example + entitled :ref:`sphx_glr_auto_examples_cluster_plot_kmeans_digits.py`. + + n_init : 'auto' or int, default='auto' + Number of times the k-means algorithm is run with different centroid + seeds. The final results is the best output of `n_init` consecutive runs + in terms of inertia. Several runs are recommended for sparse + high-dimensional problems (see :ref:`kmeans_sparse_high_dim`). + + When `n_init='auto'`, the number of runs depends on the value of init: + 10 if using `init='random'` or `init` is a callable; + 1 if using `init='k-means++'` or `init` is an array-like. + + .. versionadded:: 1.2 + Added 'auto' option for `n_init`. + + .. versionchanged:: 1.4 + Default value for `n_init` changed to `'auto'`. + + max_iter : int, default=300 + Maximum number of iterations of the k-means algorithm for a + single run. + + tol : float, default=1e-4 + Relative tolerance with regards to Frobenius norm of the difference + in the cluster centers of two consecutive iterations to declare + convergence. + + verbose : int, default=0 + Verbosity mode. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for centroid initialization. Use + an int to make the randomness deterministic. + See :term:`Glossary `. + + copy_x : bool, default=True + When pre-computing distances it is more numerically accurate to center + the data first. If copy_x is True (default), then the original data is + not modified. If False, the original data is modified, and put back + before the function returns, but small numerical differences may be + introduced by subtracting and then adding the data mean. Note that if + the original data is not C-contiguous, a copy will be made even if + copy_x is False. If the original data is sparse, but not in CSR format, + a copy will be made even if copy_x is False. + + algorithm : {"lloyd", "elkan"}, default="lloyd" + K-means algorithm to use. The classical EM-style algorithm is `"lloyd"`. + The `"elkan"` variation can be more efficient on some datasets with + well-defined clusters, by using the triangle inequality. However it's + more memory intensive due to the allocation of an extra array of shape + `(n_samples, n_clusters)`. + + .. versionchanged:: 0.18 + Added Elkan algorithm + + .. versionchanged:: 1.1 + Renamed "full" to "lloyd", and deprecated "auto" and "full". + Changed "auto" to use "lloyd" instead of "elkan". + + Attributes + ---------- + cluster_centers_ : ndarray of shape (n_clusters, n_features) + Coordinates of cluster centers. If the algorithm stops before fully + converging (see ``tol`` and ``max_iter``), these will not be + consistent with ``labels_``. + + labels_ : ndarray of shape (n_samples,) + Labels of each point + + inertia_ : float + Sum of squared distances of samples to their closest cluster center, + weighted by the sample weights if provided. + + n_iter_ : int + Number of iterations run. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MiniBatchKMeans : Alternative online implementation that does incremental + updates of the centers positions using mini-batches. + For large scale learning (say n_samples > 10k) MiniBatchKMeans is + probably much faster than the default batch implementation. + + Notes + ----- + The k-means problem is solved using either Lloyd's or Elkan's algorithm. + + The average complexity is given by O(k n T), where n is the number of + samples and T is the number of iteration. + + The worst case complexity is given by O(n^(k+2/p)) with + n = n_samples, p = n_features. + Refer to :doi:`"How slow is the k-means method?" D. Arthur and S. Vassilvitskii - + SoCG2006.<10.1145/1137856.1137880>` for more details. + + In practice, the k-means algorithm is very fast (one of the fastest + clustering algorithms available), but it falls in local minima. That's why + it can be useful to restart it several times. + + If the algorithm stops before fully converging (because of ``tol`` or + ``max_iter``), ``labels_`` and ``cluster_centers_`` will not be consistent, + i.e. the ``cluster_centers_`` will not be the means of the points in each + cluster. Also, the estimator will reassign ``labels_`` after the last + iteration to make ``labels_`` consistent with ``predict`` on the training + set. + + Examples + -------- + + >>> from sklearn.cluster import KMeans + >>> import numpy as np + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [10, 2], [10, 4], [10, 0]]) + >>> kmeans = KMeans(n_clusters=2, random_state=0, n_init="auto").fit(X) + >>> kmeans.labels_ + array([1, 1, 1, 0, 0, 0], dtype=int32) + >>> kmeans.predict([[0, 0], [12, 3]]) + array([1, 0], dtype=int32) + >>> kmeans.cluster_centers_ + array([[10., 2.], + [ 1., 2.]]) + + For a more detailed example of K-Means using the iris dataset see + :ref:`sphx_glr_auto_examples_cluster_plot_cluster_iris.py`. + + For examples of common problems with K-Means and how to address them see + :ref:`sphx_glr_auto_examples_cluster_plot_kmeans_assumptions.py`. + + For an example of how to use K-Means to perform color quantization see + :ref:`sphx_glr_auto_examples_cluster_plot_color_quantization.py`. + + For a demonstration of how K-Means can be used to cluster text documents see + :ref:`sphx_glr_auto_examples_text_plot_document_clustering.py`. + + For a comparison between K-Means and MiniBatchKMeans refer to example + :ref:`sphx_glr_auto_examples_cluster_plot_mini_batch_kmeans.py`. + """ + + _parameter_constraints: dict = { + **_BaseKMeans._parameter_constraints, + "copy_x": ["boolean"], + "algorithm": [StrOptions({"lloyd", "elkan"})], + } + + def __init__( + self, + n_clusters=8, + *, + init="k-means++", + n_init="auto", + max_iter=300, + tol=1e-4, + verbose=0, + random_state=None, + copy_x=True, + algorithm="lloyd", + ): + super().__init__( + n_clusters=n_clusters, + init=init, + n_init=n_init, + max_iter=max_iter, + tol=tol, + verbose=verbose, + random_state=random_state, + ) + + self.copy_x = copy_x + self.algorithm = algorithm + + def _check_params_vs_input(self, X): + super()._check_params_vs_input(X, default_n_init=10) + + self._algorithm = self.algorithm + if self._algorithm == "elkan" and self.n_clusters == 1: + warnings.warn( + ( + "algorithm='elkan' doesn't make sense for a single " + "cluster. Using 'lloyd' instead." + ), + RuntimeWarning, + ) + self._algorithm = "lloyd" + + def _warn_mkl_vcomp(self, n_active_threads): + """Warn when vcomp and mkl are both present""" + warnings.warn( + "KMeans is known to have a memory leak on Windows " + "with MKL, when there are less chunks than available " + "threads. You can avoid it by setting the environment" + f" variable OMP_NUM_THREADS={n_active_threads}." + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, sample_weight=None): + """Compute k-means clustering. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training instances to cluster. It must be noted that the data + will be converted to C ordering, which will cause a memory + copy if the given data is not C-contiguous. + If a sparse matrix is passed, a copy will be made if it's not in + CSR format. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. `sample_weight` is not used during + initialization if `init` is a callable or a user provided array. + + .. versionadded:: 0.20 + + Returns + ------- + self : object + Fitted estimator. + """ + X = self._validate_data( + X, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + copy=self.copy_x, + accept_large_sparse=False, + ) + + self._check_params_vs_input(X) + + random_state = check_random_state(self.random_state) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + self._n_threads = _openmp_effective_n_threads() + + # Validate init array + init = self.init + init_is_array_like = _is_arraylike_not_scalar(init) + if init_is_array_like: + init = check_array(init, dtype=X.dtype, copy=True, order="C") + self._validate_center_shape(X, init) + + # subtract of mean of x for more accurate distance computations + if not sp.issparse(X): + X_mean = X.mean(axis=0) + # The copy was already done above + X -= X_mean + + if init_is_array_like: + init -= X_mean + + # precompute squared norms of data points + x_squared_norms = row_norms(X, squared=True) + + if self._algorithm == "elkan": + kmeans_single = _kmeans_single_elkan + else: + kmeans_single = _kmeans_single_lloyd + self._check_mkl_vcomp(X, X.shape[0]) + + best_inertia, best_labels = None, None + + for i in range(self._n_init): + # Initialize centers + centers_init = self._init_centroids( + X, + x_squared_norms=x_squared_norms, + init=init, + random_state=random_state, + sample_weight=sample_weight, + ) + if self.verbose: + print("Initialization complete") + + # run a k-means once + labels, inertia, centers, n_iter_ = kmeans_single( + X, + sample_weight, + centers_init, + max_iter=self.max_iter, + verbose=self.verbose, + tol=self._tol, + n_threads=self._n_threads, + ) + + # determine if these results are the best so far + # we chose a new run if it has a better inertia and the clustering is + # different from the best so far (it's possible that the inertia is + # slightly better even if the clustering is the same with potentially + # permuted labels, due to rounding errors) + if best_inertia is None or ( + inertia < best_inertia + and not _is_same_clustering(labels, best_labels, self.n_clusters) + ): + best_labels = labels + best_centers = centers + best_inertia = inertia + best_n_iter = n_iter_ + + if not sp.issparse(X): + if not self.copy_x: + X += X_mean + best_centers += X_mean + + distinct_clusters = len(set(best_labels)) + if distinct_clusters < self.n_clusters: + warnings.warn( + "Number of distinct clusters ({}) found smaller than " + "n_clusters ({}). Possibly due to duplicate points " + "in X.".format(distinct_clusters, self.n_clusters), + ConvergenceWarning, + stacklevel=2, + ) + + self.cluster_centers_ = best_centers + self._n_features_out = self.cluster_centers_.shape[0] + self.labels_ = best_labels + self.inertia_ = best_inertia + self.n_iter_ = best_n_iter + return self + + +def _mini_batch_step( + X, + sample_weight, + centers, + centers_new, + weight_sums, + random_state, + random_reassign=False, + reassignment_ratio=0.01, + verbose=False, + n_threads=1, +): + """Incremental update of the centers for the Minibatch K-Means algorithm. + + Parameters + ---------- + + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The original data array. If sparse, must be in CSR format. + + x_squared_norms : ndarray of shape (n_samples,) + Squared euclidean norm of each data point. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in `X`. + + centers : ndarray of shape (n_clusters, n_features) + The cluster centers before the current iteration + + centers_new : ndarray of shape (n_clusters, n_features) + The cluster centers after the current iteration. Modified in-place. + + weight_sums : ndarray of shape (n_clusters,) + The vector in which we keep track of the numbers of points in a + cluster. This array is modified in place. + + random_state : RandomState instance + Determines random number generation for low count centers reassignment. + See :term:`Glossary `. + + random_reassign : boolean, default=False + If True, centers with very low counts are randomly reassigned + to observations. + + reassignment_ratio : float, default=0.01 + Control the fraction of the maximum number of counts for a + center to be reassigned. A higher value means that low count + centers are more likely to be reassigned, which means that the + model will take longer to converge, but should converge in a + better clustering. + + verbose : bool, default=False + Controls the verbosity. + + n_threads : int, default=1 + The number of OpenMP threads to use for the computation. + + Returns + ------- + inertia : float + Sum of squared distances of samples to their closest cluster center. + The inertia is computed after finding the labels and before updating + the centers. + """ + # Perform label assignment to nearest centers + # For better efficiency, it's better to run _mini_batch_step in a + # threadpool_limit context than using _labels_inertia_threadpool_limit here + labels, inertia = _labels_inertia(X, sample_weight, centers, n_threads=n_threads) + + # Update centers according to the labels + if sp.issparse(X): + _minibatch_update_sparse( + X, sample_weight, centers, centers_new, weight_sums, labels, n_threads + ) + else: + _minibatch_update_dense( + X, + sample_weight, + centers, + centers_new, + weight_sums, + labels, + n_threads, + ) + + # Reassign clusters that have very low weight + if random_reassign and reassignment_ratio > 0: + to_reassign = weight_sums < reassignment_ratio * weight_sums.max() + + # pick at most .5 * batch_size samples as new centers + if to_reassign.sum() > 0.5 * X.shape[0]: + indices_dont_reassign = np.argsort(weight_sums)[int(0.5 * X.shape[0]) :] + to_reassign[indices_dont_reassign] = False + n_reassigns = to_reassign.sum() + + if n_reassigns: + # Pick new clusters amongst observations with uniform probability + new_centers = random_state.choice( + X.shape[0], replace=False, size=n_reassigns + ) + if verbose: + print(f"[MiniBatchKMeans] Reassigning {n_reassigns} cluster centers.") + + if sp.issparse(X): + assign_rows_csr( + X, + new_centers.astype(np.intp, copy=False), + np.where(to_reassign)[0].astype(np.intp, copy=False), + centers_new, + ) + else: + centers_new[to_reassign] = X[new_centers] + + # reset counts of reassigned centers, but don't reset them too small + # to avoid instant reassignment. This is a pretty dirty hack as it + # also modifies the learning rates. + weight_sums[to_reassign] = np.min(weight_sums[~to_reassign]) + + return inertia + + +class MiniBatchKMeans(_BaseKMeans): + """ + Mini-Batch K-Means clustering. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + + n_clusters : int, default=8 + The number of clusters to form as well as the number of + centroids to generate. + + init : {'k-means++', 'random'}, callable or array-like of shape \ + (n_clusters, n_features), default='k-means++' + Method for initialization: + + 'k-means++' : selects initial cluster centroids using sampling based on + an empirical probability distribution of the points' contribution to the + overall inertia. This technique speeds up convergence. The algorithm + implemented is "greedy k-means++". It differs from the vanilla k-means++ + by making several trials at each sampling step and choosing the best centroid + among them. + + 'random': choose `n_clusters` observations (rows) at random from data + for the initial centroids. + + If an array is passed, it should be of shape (n_clusters, n_features) + and gives the initial centers. + + If a callable is passed, it should take arguments X, n_clusters and a + random state and return an initialization. + + max_iter : int, default=100 + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion heuristics. + + batch_size : int, default=1024 + Size of the mini batches. + For faster computations, you can set the ``batch_size`` greater than + 256 * number of cores to enable parallelism on all cores. + + .. versionchanged:: 1.0 + `batch_size` default changed from 100 to 1024. + + verbose : int, default=0 + Verbosity mode. + + compute_labels : bool, default=True + Compute label assignment and inertia for the complete dataset + once the minibatch optimization has converged in fit. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for centroid initialization and + random reassignment. Use an int to make the randomness deterministic. + See :term:`Glossary `. + + tol : float, default=0.0 + Control early stopping based on the relative center changes as + measured by a smoothed, variance-normalized of the mean center + squared position changes. This early stopping heuristics is + closer to the one used for the batch variant of the algorithms + but induces a slight computational and memory overhead over the + inertia heuristic. + + To disable convergence detection based on normalized center + change, set tol to 0.0 (default). + + max_no_improvement : int, default=10 + Control early stopping based on the consecutive number of mini + batches that does not yield an improvement on the smoothed inertia. + + To disable convergence detection based on inertia, set + max_no_improvement to None. + + init_size : int, default=None + Number of samples to randomly sample for speeding up the + initialization (sometimes at the expense of accuracy): the + only algorithm is initialized by running a batch KMeans on a + random subset of the data. This needs to be larger than n_clusters. + + If `None`, the heuristic is `init_size = 3 * batch_size` if + `3 * batch_size < n_clusters`, else `init_size = 3 * n_clusters`. + + n_init : 'auto' or int, default="auto" + Number of random initializations that are tried. + In contrast to KMeans, the algorithm is only run once, using the best of + the `n_init` initializations as measured by inertia. Several runs are + recommended for sparse high-dimensional problems (see + :ref:`kmeans_sparse_high_dim`). + + When `n_init='auto'`, the number of runs depends on the value of init: + 3 if using `init='random'` or `init` is a callable; + 1 if using `init='k-means++'` or `init` is an array-like. + + .. versionadded:: 1.2 + Added 'auto' option for `n_init`. + + .. versionchanged:: 1.4 + Default value for `n_init` changed to `'auto'` in version. + + reassignment_ratio : float, default=0.01 + Control the fraction of the maximum number of counts for a center to + be reassigned. A higher value means that low count centers are more + easily reassigned, which means that the model will take longer to + converge, but should converge in a better clustering. However, too high + a value may cause convergence issues, especially with a small batch + size. + + Attributes + ---------- + + cluster_centers_ : ndarray of shape (n_clusters, n_features) + Coordinates of cluster centers. + + labels_ : ndarray of shape (n_samples,) + Labels of each point (if compute_labels is set to True). + + inertia_ : float + The value of the inertia criterion associated with the chosen + partition if compute_labels is set to True. If compute_labels is set to + False, it's an approximation of the inertia based on an exponentially + weighted average of the batch inertiae. + The inertia is defined as the sum of square distances of samples to + their cluster center, weighted by the sample weights if provided. + + n_iter_ : int + Number of iterations over the full dataset. + + n_steps_ : int + Number of minibatches processed. + + .. versionadded:: 1.0 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + KMeans : The classic implementation of the clustering method based on the + Lloyd's algorithm. It consumes the whole set of input data at each + iteration. + + Notes + ----- + See https://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf + + When there are too few points in the dataset, some centers may be + duplicated, which means that a proper clustering in terms of the number + of requesting clusters and the number of returned clusters will not + always match. One solution is to set `reassignment_ratio=0`, which + prevents reassignments of clusters that are too small. + + Examples + -------- + >>> from sklearn.cluster import MiniBatchKMeans + >>> import numpy as np + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [4, 2], [4, 0], [4, 4], + ... [4, 5], [0, 1], [2, 2], + ... [3, 2], [5, 5], [1, -1]]) + >>> # manually fit on batches + >>> kmeans = MiniBatchKMeans(n_clusters=2, + ... random_state=0, + ... batch_size=6, + ... n_init="auto") + >>> kmeans = kmeans.partial_fit(X[0:6,:]) + >>> kmeans = kmeans.partial_fit(X[6:12,:]) + >>> kmeans.cluster_centers_ + array([[3.375, 3. ], + [0.75 , 0.5 ]]) + >>> kmeans.predict([[0, 0], [4, 4]]) + array([1, 0], dtype=int32) + >>> # fit on the whole data + >>> kmeans = MiniBatchKMeans(n_clusters=2, + ... random_state=0, + ... batch_size=6, + ... max_iter=10, + ... n_init="auto").fit(X) + >>> kmeans.cluster_centers_ + array([[3.55102041, 2.48979592], + [1.06896552, 1. ]]) + >>> kmeans.predict([[0, 0], [4, 4]]) + array([1, 0], dtype=int32) + """ + + _parameter_constraints: dict = { + **_BaseKMeans._parameter_constraints, + "batch_size": [Interval(Integral, 1, None, closed="left")], + "compute_labels": ["boolean"], + "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None], + "init_size": [Interval(Integral, 1, None, closed="left"), None], + "reassignment_ratio": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + n_clusters=8, + *, + init="k-means++", + max_iter=100, + batch_size=1024, + verbose=0, + compute_labels=True, + random_state=None, + tol=0.0, + max_no_improvement=10, + init_size=None, + n_init="auto", + reassignment_ratio=0.01, + ): + super().__init__( + n_clusters=n_clusters, + init=init, + max_iter=max_iter, + verbose=verbose, + random_state=random_state, + tol=tol, + n_init=n_init, + ) + + self.max_no_improvement = max_no_improvement + self.batch_size = batch_size + self.compute_labels = compute_labels + self.init_size = init_size + self.reassignment_ratio = reassignment_ratio + + def _check_params_vs_input(self, X): + super()._check_params_vs_input(X, default_n_init=3) + + self._batch_size = min(self.batch_size, X.shape[0]) + + # init_size + self._init_size = self.init_size + if self._init_size is None: + self._init_size = 3 * self._batch_size + if self._init_size < self.n_clusters: + self._init_size = 3 * self.n_clusters + elif self._init_size < self.n_clusters: + warnings.warn( + ( + f"init_size={self._init_size} should be larger than " + f"n_clusters={self.n_clusters}. Setting it to " + "min(3*n_clusters, n_samples)" + ), + RuntimeWarning, + stacklevel=2, + ) + self._init_size = 3 * self.n_clusters + self._init_size = min(self._init_size, X.shape[0]) + + # reassignment_ratio + if self.reassignment_ratio < 0: + raise ValueError( + "reassignment_ratio should be >= 0, got " + f"{self.reassignment_ratio} instead." + ) + + def _warn_mkl_vcomp(self, n_active_threads): + """Warn when vcomp and mkl are both present""" + warnings.warn( + "MiniBatchKMeans is known to have a memory leak on " + "Windows with MKL, when there are less chunks than " + "available threads. You can prevent it by setting " + f"batch_size >= {self._n_threads * CHUNK_SIZE} or by " + "setting the environment variable " + f"OMP_NUM_THREADS={n_active_threads}" + ) + + def _mini_batch_convergence( + self, step, n_steps, n_samples, centers_squared_diff, batch_inertia + ): + """Helper function to encapsulate the early stopping logic""" + # Normalize inertia to be able to compare values when + # batch_size changes + batch_inertia /= self._batch_size + + # count steps starting from 1 for user friendly verbose mode. + step = step + 1 + + # Ignore first iteration because it's inertia from initialization. + if step == 1: + if self.verbose: + print( + f"Minibatch step {step}/{n_steps}: mean batch " + f"inertia: {batch_inertia}" + ) + return False + + # Compute an Exponentially Weighted Average of the inertia to + # monitor the convergence while discarding minibatch-local stochastic + # variability: https://en.wikipedia.org/wiki/Moving_average + if self._ewa_inertia is None: + self._ewa_inertia = batch_inertia + else: + alpha = self._batch_size * 2.0 / (n_samples + 1) + alpha = min(alpha, 1) + self._ewa_inertia = self._ewa_inertia * (1 - alpha) + batch_inertia * alpha + + # Log progress to be able to monitor convergence + if self.verbose: + print( + f"Minibatch step {step}/{n_steps}: mean batch inertia: " + f"{batch_inertia}, ewa inertia: {self._ewa_inertia}" + ) + + # Early stopping based on absolute tolerance on squared change of + # centers position + if self._tol > 0.0 and centers_squared_diff <= self._tol: + if self.verbose: + print(f"Converged (small centers change) at step {step}/{n_steps}") + return True + + # Early stopping heuristic due to lack of improvement on smoothed + # inertia + if self._ewa_inertia_min is None or self._ewa_inertia < self._ewa_inertia_min: + self._no_improvement = 0 + self._ewa_inertia_min = self._ewa_inertia + else: + self._no_improvement += 1 + + if ( + self.max_no_improvement is not None + and self._no_improvement >= self.max_no_improvement + ): + if self.verbose: + print( + "Converged (lack of improvement in inertia) at step " + f"{step}/{n_steps}" + ) + return True + + return False + + def _random_reassign(self): + """Check if a random reassignment needs to be done. + + Do random reassignments each time 10 * n_clusters samples have been + processed. + + If there are empty clusters we always want to reassign. + """ + self._n_since_last_reassign += self._batch_size + if (self._counts == 0).any() or self._n_since_last_reassign >= ( + 10 * self.n_clusters + ): + self._n_since_last_reassign = 0 + return True + return False + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, sample_weight=None): + """Compute the centroids on X by chunking it into mini-batches. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training instances to cluster. It must be noted that the data + will be converted to C ordering, which will cause a memory copy + if the given data is not C-contiguous. + If a sparse matrix is passed, a copy will be made if it's not in + CSR format. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. `sample_weight` is not used during + initialization if `init` is a callable or a user provided array. + + .. versionadded:: 0.20 + + Returns + ------- + self : object + Fitted estimator. + """ + X = self._validate_data( + X, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + accept_large_sparse=False, + ) + + self._check_params_vs_input(X) + random_state = check_random_state(self.random_state) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + self._n_threads = _openmp_effective_n_threads() + n_samples, n_features = X.shape + + # Validate init array + init = self.init + if _is_arraylike_not_scalar(init): + init = check_array(init, dtype=X.dtype, copy=True, order="C") + self._validate_center_shape(X, init) + + self._check_mkl_vcomp(X, self._batch_size) + + # precompute squared norms of data points + x_squared_norms = row_norms(X, squared=True) + + # Validation set for the init + validation_indices = random_state.randint(0, n_samples, self._init_size) + X_valid = X[validation_indices] + sample_weight_valid = sample_weight[validation_indices] + + # perform several inits with random subsets + best_inertia = None + for init_idx in range(self._n_init): + if self.verbose: + print(f"Init {init_idx + 1}/{self._n_init} with method {init}") + + # Initialize the centers using only a fraction of the data as we + # expect n_samples to be very large when using MiniBatchKMeans. + cluster_centers = self._init_centroids( + X, + x_squared_norms=x_squared_norms, + init=init, + random_state=random_state, + init_size=self._init_size, + sample_weight=sample_weight, + ) + + # Compute inertia on a validation set. + _, inertia = _labels_inertia_threadpool_limit( + X_valid, + sample_weight_valid, + cluster_centers, + n_threads=self._n_threads, + ) + + if self.verbose: + print(f"Inertia for init {init_idx + 1}/{self._n_init}: {inertia}") + if best_inertia is None or inertia < best_inertia: + init_centers = cluster_centers + best_inertia = inertia + + centers = init_centers + centers_new = np.empty_like(centers) + + # Initialize counts + self._counts = np.zeros(self.n_clusters, dtype=X.dtype) + + # Attributes to monitor the convergence + self._ewa_inertia = None + self._ewa_inertia_min = None + self._no_improvement = 0 + + # Initialize number of samples seen since last reassignment + self._n_since_last_reassign = 0 + + n_steps = (self.max_iter * n_samples) // self._batch_size + + with threadpool_limits(limits=1, user_api="blas"): + # Perform the iterative optimization until convergence + for i in range(n_steps): + # Sample a minibatch from the full dataset + minibatch_indices = random_state.randint(0, n_samples, self._batch_size) + + # Perform the actual update step on the minibatch data + batch_inertia = _mini_batch_step( + X=X[minibatch_indices], + sample_weight=sample_weight[minibatch_indices], + centers=centers, + centers_new=centers_new, + weight_sums=self._counts, + random_state=random_state, + random_reassign=self._random_reassign(), + reassignment_ratio=self.reassignment_ratio, + verbose=self.verbose, + n_threads=self._n_threads, + ) + + if self._tol > 0.0: + centers_squared_diff = np.sum((centers_new - centers) ** 2) + else: + centers_squared_diff = 0 + + centers, centers_new = centers_new, centers + + # Monitor convergence and do early stopping if necessary + if self._mini_batch_convergence( + i, n_steps, n_samples, centers_squared_diff, batch_inertia + ): + break + + self.cluster_centers_ = centers + self._n_features_out = self.cluster_centers_.shape[0] + + self.n_steps_ = i + 1 + self.n_iter_ = int(np.ceil(((i + 1) * self._batch_size) / n_samples)) + + if self.compute_labels: + self.labels_, self.inertia_ = _labels_inertia_threadpool_limit( + X, + sample_weight, + self.cluster_centers_, + n_threads=self._n_threads, + ) + else: + self.inertia_ = self._ewa_inertia * n_samples + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, sample_weight=None): + """Update k means estimate on a single mini-batch X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training instances to cluster. It must be noted that the data + will be converted to C ordering, which will cause a memory copy + if the given data is not C-contiguous. + If a sparse matrix is passed, a copy will be made if it's not in + CSR format. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. `sample_weight` is not used during + initialization if `init` is a callable or a user provided array. + + Returns + ------- + self : object + Return updated estimator. + """ + has_centers = hasattr(self, "cluster_centers_") + + X = self._validate_data( + X, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + accept_large_sparse=False, + reset=not has_centers, + ) + + self._random_state = getattr( + self, "_random_state", check_random_state(self.random_state) + ) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + self.n_steps_ = getattr(self, "n_steps_", 0) + + # precompute squared norms of data points + x_squared_norms = row_norms(X, squared=True) + + if not has_centers: + # this instance has not been fitted yet (fit or partial_fit) + self._check_params_vs_input(X) + self._n_threads = _openmp_effective_n_threads() + + # Validate init array + init = self.init + if _is_arraylike_not_scalar(init): + init = check_array(init, dtype=X.dtype, copy=True, order="C") + self._validate_center_shape(X, init) + + self._check_mkl_vcomp(X, X.shape[0]) + + # initialize the cluster centers + self.cluster_centers_ = self._init_centroids( + X, + x_squared_norms=x_squared_norms, + init=init, + random_state=self._random_state, + init_size=self._init_size, + sample_weight=sample_weight, + ) + + # Initialize counts + self._counts = np.zeros(self.n_clusters, dtype=X.dtype) + + # Initialize number of samples seen since last reassignment + self._n_since_last_reassign = 0 + + with threadpool_limits(limits=1, user_api="blas"): + _mini_batch_step( + X, + sample_weight=sample_weight, + centers=self.cluster_centers_, + centers_new=self.cluster_centers_, + weight_sums=self._counts, + random_state=self._random_state, + random_reassign=self._random_reassign(), + reassignment_ratio=self.reassignment_ratio, + verbose=self.verbose, + n_threads=self._n_threads, + ) + + if self.compute_labels: + self.labels_, self.inertia_ = _labels_inertia_threadpool_limit( + X, + sample_weight, + self.cluster_centers_, + n_threads=self._n_threads, + ) + + self.n_steps_ += 1 + self._n_features_out = self.cluster_centers_.shape[0] + + return self diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_mean_shift.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_mean_shift.py new file mode 100644 index 0000000000000000000000000000000000000000..fae11cca7df233963c7de73f42f5706ef7caf4c7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_mean_shift.py @@ -0,0 +1,575 @@ +"""Mean shift clustering algorithm. + +Mean shift clustering aims to discover *blobs* in a smooth density of +samples. It is a centroid based algorithm, which works by updating candidates +for centroids to be the mean of the points within a given region. These +candidates are then filtered in a post-processing stage to eliminate +near-duplicates to form the final set of centroids. + +Seeding is performed using a binning technique for scalability. +""" + +# Authors: Conrad Lee +# Alexandre Gramfort +# Gael Varoquaux +# Martino Sorbaro + +import warnings +from collections import defaultdict +from numbers import Integral, Real + +import numpy as np + +from .._config import config_context +from ..base import BaseEstimator, ClusterMixin, _fit_context +from ..metrics.pairwise import pairwise_distances_argmin +from ..neighbors import NearestNeighbors +from ..utils import check_array, check_random_state, gen_batches +from ..utils._param_validation import Interval, validate_params +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted + + +@validate_params( + { + "X": ["array-like"], + "quantile": [Interval(Real, 0, 1, closed="both")], + "n_samples": [Interval(Integral, 1, None, closed="left"), None], + "random_state": ["random_state"], + "n_jobs": [Integral, None], + }, + prefer_skip_nested_validation=True, +) +def estimate_bandwidth(X, *, quantile=0.3, n_samples=None, random_state=0, n_jobs=None): + """Estimate the bandwidth to use with the mean-shift algorithm. + + This function takes time at least quadratic in `n_samples`. For large + datasets, it is wise to subsample by setting `n_samples`. Alternatively, + the parameter `bandwidth` can be set to a small value without estimating + it. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input points. + + quantile : float, default=0.3 + Should be between [0, 1] + 0.5 means that the median of all pairwise distances is used. + + n_samples : int, default=None + The number of samples to use. If not given, all samples are used. + + random_state : int, RandomState instance, default=None + The generator used to randomly select the samples from input points + for bandwidth estimation. Use an int to make the randomness + deterministic. + See :term:`Glossary `. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + bandwidth : float + The bandwidth parameter. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import estimate_bandwidth + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> estimate_bandwidth(X, quantile=0.5) + 1.61... + """ + X = check_array(X) + + random_state = check_random_state(random_state) + if n_samples is not None: + idx = random_state.permutation(X.shape[0])[:n_samples] + X = X[idx] + n_neighbors = int(X.shape[0] * quantile) + if n_neighbors < 1: # cannot fit NearestNeighbors with n_neighbors = 0 + n_neighbors = 1 + nbrs = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=n_jobs) + nbrs.fit(X) + + bandwidth = 0.0 + for batch in gen_batches(len(X), 500): + d, _ = nbrs.kneighbors(X[batch, :], return_distance=True) + bandwidth += np.max(d, axis=1).sum() + + return bandwidth / X.shape[0] + + +# separate function for each seed's iterative loop +def _mean_shift_single_seed(my_mean, X, nbrs, max_iter): + # For each seed, climb gradient until convergence or max_iter + bandwidth = nbrs.get_params()["radius"] + stop_thresh = 1e-3 * bandwidth # when mean has converged + completed_iterations = 0 + while True: + # Find mean of points within bandwidth + i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth, return_distance=False)[0] + points_within = X[i_nbrs] + if len(points_within) == 0: + break # Depending on seeding strategy this condition may occur + my_old_mean = my_mean # save the old mean + my_mean = np.mean(points_within, axis=0) + # If converged or at max_iter, adds the cluster + if ( + np.linalg.norm(my_mean - my_old_mean) < stop_thresh + or completed_iterations == max_iter + ): + break + completed_iterations += 1 + return tuple(my_mean), len(points_within), completed_iterations + + +@validate_params( + {"X": ["array-like"]}, + prefer_skip_nested_validation=False, +) +def mean_shift( + X, + *, + bandwidth=None, + seeds=None, + bin_seeding=False, + min_bin_freq=1, + cluster_all=True, + max_iter=300, + n_jobs=None, +): + """Perform mean shift clustering of data using a flat kernel. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + + X : array-like of shape (n_samples, n_features) + Input data. + + bandwidth : float, default=None + Kernel bandwidth. If not None, must be in the range [0, +inf). + + If None, the bandwidth is determined using a heuristic based on + the median of all pairwise distances. This will take quadratic time in + the number of samples. The sklearn.cluster.estimate_bandwidth function + can be used to do this more efficiently. + + seeds : array-like of shape (n_seeds, n_features) or None + Point used as initial kernel locations. If None and bin_seeding=False, + each data point is used as a seed. If None and bin_seeding=True, + see bin_seeding. + + bin_seeding : bool, default=False + If true, initial kernel locations are not locations of all + points, but rather the location of the discretized version of + points, where points are binned onto a grid whose coarseness + corresponds to the bandwidth. Setting this option to True will speed + up the algorithm because fewer seeds will be initialized. + Ignored if seeds argument is not None. + + min_bin_freq : int, default=1 + To speed up the algorithm, accept only those bins with at least + min_bin_freq points as seeds. + + cluster_all : bool, default=True + If true, then all points are clustered, even those orphans that are + not within any kernel. Orphans are assigned to the nearest kernel. + If false, then orphans are given cluster label -1. + + max_iter : int, default=300 + Maximum number of iterations, per seed point before the clustering + operation terminates (for that seed point), if has not converged yet. + + n_jobs : int, default=None + The number of jobs to use for the computation. The following tasks benefit + from the parallelization: + + - The search of nearest neighbors for bandwidth estimation and label + assignments. See the details in the docstring of the + ``NearestNeighbors`` class. + - Hill-climbing optimization for all seeds. + + See :term:`Glossary ` for more details. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionadded:: 0.17 + Parallel Execution using *n_jobs*. + + Returns + ------- + + cluster_centers : ndarray of shape (n_clusters, n_features) + Coordinates of cluster centers. + + labels : ndarray of shape (n_samples,) + Cluster labels for each point. + + Notes + ----- + For an example, see :ref:`examples/cluster/plot_mean_shift.py + `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import mean_shift + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> cluster_centers, labels = mean_shift(X, bandwidth=2) + >>> cluster_centers + array([[3.33..., 6. ], + [1.33..., 0.66...]]) + >>> labels + array([1, 1, 1, 0, 0, 0]) + """ + model = MeanShift( + bandwidth=bandwidth, + seeds=seeds, + min_bin_freq=min_bin_freq, + bin_seeding=bin_seeding, + cluster_all=cluster_all, + n_jobs=n_jobs, + max_iter=max_iter, + ).fit(X) + return model.cluster_centers_, model.labels_ + + +def get_bin_seeds(X, bin_size, min_bin_freq=1): + """Find seeds for mean_shift. + + Finds seeds by first binning data onto a grid whose lines are + spaced bin_size apart, and then choosing those bins with at least + min_bin_freq points. + + Parameters + ---------- + + X : array-like of shape (n_samples, n_features) + Input points, the same points that will be used in mean_shift. + + bin_size : float + Controls the coarseness of the binning. Smaller values lead + to more seeding (which is computationally more expensive). If you're + not sure how to set this, set it to the value of the bandwidth used + in clustering.mean_shift. + + min_bin_freq : int, default=1 + Only bins with at least min_bin_freq will be selected as seeds. + Raising this value decreases the number of seeds found, which + makes mean_shift computationally cheaper. + + Returns + ------- + bin_seeds : array-like of shape (n_samples, n_features) + Points used as initial kernel positions in clustering.mean_shift. + """ + if bin_size == 0: + return X + + # Bin points + bin_sizes = defaultdict(int) + for point in X: + binned_point = np.round(point / bin_size) + bin_sizes[tuple(binned_point)] += 1 + + # Select only those bins as seeds which have enough members + bin_seeds = np.array( + [point for point, freq in bin_sizes.items() if freq >= min_bin_freq], + dtype=np.float32, + ) + if len(bin_seeds) == len(X): + warnings.warn( + "Binning data failed with provided bin_size=%f, using data points as seeds." + % bin_size + ) + return X + bin_seeds = bin_seeds * bin_size + return bin_seeds + + +class MeanShift(ClusterMixin, BaseEstimator): + """Mean shift clustering using a flat kernel. + + Mean shift clustering aims to discover "blobs" in a smooth density of + samples. It is a centroid-based algorithm, which works by updating + candidates for centroids to be the mean of the points within a given + region. These candidates are then filtered in a post-processing stage to + eliminate near-duplicates to form the final set of centroids. + + Seeding is performed using a binning technique for scalability. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + bandwidth : float, default=None + Bandwidth used in the flat kernel. + + If not given, the bandwidth is estimated using + sklearn.cluster.estimate_bandwidth; see the documentation for that + function for hints on scalability (see also the Notes, below). + + seeds : array-like of shape (n_samples, n_features), default=None + Seeds used to initialize kernels. If not set, + the seeds are calculated by clustering.get_bin_seeds + with bandwidth as the grid size and default values for + other parameters. + + bin_seeding : bool, default=False + If true, initial kernel locations are not locations of all + points, but rather the location of the discretized version of + points, where points are binned onto a grid whose coarseness + corresponds to the bandwidth. Setting this option to True will speed + up the algorithm because fewer seeds will be initialized. + The default value is False. + Ignored if seeds argument is not None. + + min_bin_freq : int, default=1 + To speed up the algorithm, accept only those bins with at least + min_bin_freq points as seeds. + + cluster_all : bool, default=True + If true, then all points are clustered, even those orphans that are + not within any kernel. Orphans are assigned to the nearest kernel. + If false, then orphans are given cluster label -1. + + n_jobs : int, default=None + The number of jobs to use for the computation. The following tasks benefit + from the parallelization: + + - The search of nearest neighbors for bandwidth estimation and label + assignments. See the details in the docstring of the + ``NearestNeighbors`` class. + - Hill-climbing optimization for all seeds. + + See :term:`Glossary ` for more details. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + max_iter : int, default=300 + Maximum number of iterations, per seed point before the clustering + operation terminates (for that seed point), if has not converged yet. + + .. versionadded:: 0.22 + + Attributes + ---------- + cluster_centers_ : ndarray of shape (n_clusters, n_features) + Coordinates of cluster centers. + + labels_ : ndarray of shape (n_samples,) + Labels of each point. + + n_iter_ : int + Maximum number of iterations performed on each seed. + + .. versionadded:: 0.22 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + KMeans : K-Means clustering. + + Notes + ----- + + Scalability: + + Because this implementation uses a flat kernel and + a Ball Tree to look up members of each kernel, the complexity will tend + towards O(T*n*log(n)) in lower dimensions, with n the number of samples + and T the number of points. In higher dimensions the complexity will + tend towards O(T*n^2). + + Scalability can be boosted by using fewer seeds, for example by using + a higher value of min_bin_freq in the get_bin_seeds function. + + Note that the estimate_bandwidth function is much less scalable than the + mean shift algorithm and will be the bottleneck if it is used. + + References + ---------- + + Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward + feature space analysis". IEEE Transactions on Pattern Analysis and + Machine Intelligence. 2002. pp. 603-619. + + Examples + -------- + >>> from sklearn.cluster import MeanShift + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> clustering = MeanShift(bandwidth=2).fit(X) + >>> clustering.labels_ + array([1, 1, 1, 0, 0, 0]) + >>> clustering.predict([[0, 0], [5, 5]]) + array([1, 0]) + >>> clustering + MeanShift(bandwidth=2) + """ + + _parameter_constraints: dict = { + "bandwidth": [Interval(Real, 0, None, closed="neither"), None], + "seeds": ["array-like", None], + "bin_seeding": ["boolean"], + "min_bin_freq": [Interval(Integral, 1, None, closed="left")], + "cluster_all": ["boolean"], + "n_jobs": [Integral, None], + "max_iter": [Interval(Integral, 0, None, closed="left")], + } + + def __init__( + self, + *, + bandwidth=None, + seeds=None, + bin_seeding=False, + min_bin_freq=1, + cluster_all=True, + n_jobs=None, + max_iter=300, + ): + self.bandwidth = bandwidth + self.seeds = seeds + self.bin_seeding = bin_seeding + self.cluster_all = cluster_all + self.min_bin_freq = min_bin_freq + self.n_jobs = n_jobs + self.max_iter = max_iter + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Perform clustering. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Samples to cluster. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Fitted instance. + """ + X = self._validate_data(X) + bandwidth = self.bandwidth + if bandwidth is None: + bandwidth = estimate_bandwidth(X, n_jobs=self.n_jobs) + + seeds = self.seeds + if seeds is None: + if self.bin_seeding: + seeds = get_bin_seeds(X, bandwidth, self.min_bin_freq) + else: + seeds = X + n_samples, n_features = X.shape + center_intensity_dict = {} + + # We use n_jobs=1 because this will be used in nested calls under + # parallel calls to _mean_shift_single_seed so there is no need for + # for further parallelism. + nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X) + + # execute iterations on all seeds in parallel + all_res = Parallel(n_jobs=self.n_jobs)( + delayed(_mean_shift_single_seed)(seed, X, nbrs, self.max_iter) + for seed in seeds + ) + # copy results in a dictionary + for i in range(len(seeds)): + if all_res[i][1]: # i.e. len(points_within) > 0 + center_intensity_dict[all_res[i][0]] = all_res[i][1] + + self.n_iter_ = max([x[2] for x in all_res]) + + if not center_intensity_dict: + # nothing near seeds + raise ValueError( + "No point was within bandwidth=%f of any seed. Try a different seeding" + " strategy or increase the bandwidth." + % bandwidth + ) + + # POST PROCESSING: remove near duplicate points + # If the distance between two kernels is less than the bandwidth, + # then we have to remove one because it is a duplicate. Remove the + # one with fewer points. + + sorted_by_intensity = sorted( + center_intensity_dict.items(), + key=lambda tup: (tup[1], tup[0]), + reverse=True, + ) + sorted_centers = np.array([tup[0] for tup in sorted_by_intensity]) + unique = np.ones(len(sorted_centers), dtype=bool) + nbrs = NearestNeighbors(radius=bandwidth, n_jobs=self.n_jobs).fit( + sorted_centers + ) + for i, center in enumerate(sorted_centers): + if unique[i]: + neighbor_idxs = nbrs.radius_neighbors([center], return_distance=False)[ + 0 + ] + unique[neighbor_idxs] = 0 + unique[i] = 1 # leave the current point as unique + cluster_centers = sorted_centers[unique] + + # ASSIGN LABELS: a point belongs to the cluster that it is closest to + nbrs = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs).fit(cluster_centers) + labels = np.zeros(n_samples, dtype=int) + distances, idxs = nbrs.kneighbors(X) + if self.cluster_all: + labels = idxs.flatten() + else: + labels.fill(-1) + bool_selector = distances.flatten() <= bandwidth + labels[bool_selector] = idxs.flatten()[bool_selector] + + self.cluster_centers_, self.labels_ = cluster_centers, labels + return self + + def predict(self, X): + """Predict the closest cluster each sample in X belongs to. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + New data to predict. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Index of the cluster each sample belongs to. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False) + with config_context(assume_finite=True): + return pairwise_distances_argmin(X, self.cluster_centers_) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_optics.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_optics.py new file mode 100644 index 0000000000000000000000000000000000000000..493b7f40389cb410ae3f4f456c783440330c438f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_optics.py @@ -0,0 +1,1199 @@ +"""Ordering Points To Identify the Clustering Structure (OPTICS) + +These routines execute the OPTICS algorithm, and implement various +cluster extraction methods of the ordered list. + +Authors: Shane Grigsby + Adrin Jalali + Erich Schubert + Hanmin Qin +License: BSD 3 clause +""" + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy.sparse import SparseEfficiencyWarning, issparse + +from ..base import BaseEstimator, ClusterMixin, _fit_context +from ..exceptions import DataConversionWarning +from ..metrics import pairwise_distances +from ..metrics.pairwise import _VALID_METRICS, PAIRWISE_BOOLEAN_FUNCTIONS +from ..neighbors import NearestNeighbors +from ..utils import gen_batches, get_chunk_n_rows +from ..utils._param_validation import ( + HasMethods, + Interval, + RealNotInt, + StrOptions, + validate_params, +) +from ..utils.validation import check_memory + + +class OPTICS(ClusterMixin, BaseEstimator): + """Estimate clustering structure from vector array. + + OPTICS (Ordering Points To Identify the Clustering Structure), closely + related to DBSCAN, finds core sample of high density and expands clusters + from them [1]_. Unlike DBSCAN, keeps cluster hierarchy for a variable + neighborhood radius. Better suited for usage on large datasets than the + current sklearn implementation of DBSCAN. + + Clusters are then extracted using a DBSCAN-like method + (cluster_method = 'dbscan') or an automatic + technique proposed in [1]_ (cluster_method = 'xi'). + + This implementation deviates from the original OPTICS by first performing + k-nearest-neighborhood searches on all points to identify core sizes, then + computing only the distances to unprocessed points when constructing the + cluster order. Note that we do not employ a heap to manage the expansion + candidates, so the time complexity will be O(n^2). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + min_samples : int > 1 or float between 0 and 1, default=5 + The number of samples in a neighborhood for a point to be considered as + a core point. Also, up and down steep regions can't have more than + ``min_samples`` consecutive non-steep points. Expressed as an absolute + number or a fraction of the number of samples (rounded to be at least + 2). + + max_eps : float, default=np.inf + The maximum distance between two samples for one to be considered as + in the neighborhood of the other. Default value of ``np.inf`` will + identify clusters across all scales; reducing ``max_eps`` will result + in shorter run times. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Any metric from scikit-learn + or scipy.spatial.distance can be used. + + If metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two arrays as input and return one value indicating the + distance between them. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. If metric is + "precomputed", `X` is assumed to be a distance matrix and must be + square. + + Valid values for metric are: + + - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', + 'manhattan'] + + - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', + 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', + 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao', + 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', + 'yule'] + + Sparse matrices are only supported by scikit-learn metrics. + See the documentation for scipy.spatial.distance for details on these + metrics. + + .. note:: + `'kulsinski'` is deprecated from SciPy 1.9 and will removed in SciPy 1.11. + + p : float, default=2 + Parameter for the Minkowski metric from + :class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + cluster_method : str, default='xi' + The extraction method used to extract clusters using the calculated + reachability and ordering. Possible values are "xi" and "dbscan". + + eps : float, default=None + The maximum distance between two samples for one to be considered as + in the neighborhood of the other. By default it assumes the same value + as ``max_eps``. + Used only when ``cluster_method='dbscan'``. + + xi : float between 0 and 1, default=0.05 + Determines the minimum steepness on the reachability plot that + constitutes a cluster boundary. For example, an upwards point in the + reachability plot is defined by the ratio from one point to its + successor being at most 1-xi. + Used only when ``cluster_method='xi'``. + + predecessor_correction : bool, default=True + Correct clusters according to the predecessors calculated by OPTICS + [2]_. This parameter has minimal effect on most datasets. + Used only when ``cluster_method='xi'``. + + min_cluster_size : int > 1 or float between 0 and 1, default=None + Minimum number of samples in an OPTICS cluster, expressed as an + absolute number or a fraction of the number of samples (rounded to be + at least 2). If ``None``, the value of ``min_samples`` is used instead. + Used only when ``cluster_method='xi'``. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`~sklearn.neighbors.BallTree`. + - 'kd_tree' will use :class:`~sklearn.neighbors.KDTree`. + - 'brute' will use a brute-force search. + - 'auto' (default) will attempt to decide the most appropriate + algorithm based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to :class:`~sklearn.neighbors.BallTree` or + :class:`~sklearn.neighbors.KDTree`. This can affect the speed of the + construction and query, as well as the memory required to store the + tree. The optimal value depends on the nature of the problem. + + memory : str or object with the joblib.Memory interface, default=None + Used to cache the output of the computation of the tree. + By default, no caching is done. If a string is given, it is the + path to the caching directory. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + labels_ : ndarray of shape (n_samples,) + Cluster labels for each point in the dataset given to fit(). + Noisy samples and points which are not included in a leaf cluster + of ``cluster_hierarchy_`` are labeled as -1. + + reachability_ : ndarray of shape (n_samples,) + Reachability distances per sample, indexed by object order. Use + ``clust.reachability_[clust.ordering_]`` to access in cluster order. + + ordering_ : ndarray of shape (n_samples,) + The cluster ordered list of sample indices. + + core_distances_ : ndarray of shape (n_samples,) + Distance at which each sample becomes a core point, indexed by object + order. Points which will never be core have a distance of inf. Use + ``clust.core_distances_[clust.ordering_]`` to access in cluster order. + + predecessor_ : ndarray of shape (n_samples,) + Point that a sample was reached from, indexed by object order. + Seed points have a predecessor of -1. + + cluster_hierarchy_ : ndarray of shape (n_clusters, 2) + The list of clusters in the form of ``[start, end]`` in each row, with + all indices inclusive. The clusters are ordered according to + ``(end, -start)`` (ascending) so that larger clusters encompassing + smaller clusters come after those smaller ones. Since ``labels_`` does + not reflect the hierarchy, usually + ``len(cluster_hierarchy_) > np.unique(optics.labels_)``. Please also + note that these indices are of the ``ordering_``, i.e. + ``X[ordering_][start:end + 1]`` form a cluster. + Only available when ``cluster_method='xi'``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DBSCAN : A similar clustering for a specified neighborhood radius (eps). + Our implementation is optimized for runtime. + + References + ---------- + .. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel, + and Jörg Sander. "OPTICS: ordering points to identify the clustering + structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60. + + .. [2] Schubert, Erich, Michael Gertz. + "Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of + the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329. + + Examples + -------- + >>> from sklearn.cluster import OPTICS + >>> import numpy as np + >>> X = np.array([[1, 2], [2, 5], [3, 6], + ... [8, 7], [8, 8], [7, 3]]) + >>> clustering = OPTICS(min_samples=2).fit(X) + >>> clustering.labels_ + array([0, 0, 0, 1, 1, 1]) + + For a more detailed example see + :ref:`sphx_glr_auto_examples_cluster_plot_optics.py`. + """ + + _parameter_constraints: dict = { + "min_samples": [ + Interval(Integral, 2, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="both"), + ], + "max_eps": [Interval(Real, 0, None, closed="both")], + "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable], + "p": [Interval(Real, 1, None, closed="left")], + "metric_params": [dict, None], + "cluster_method": [StrOptions({"dbscan", "xi"})], + "eps": [Interval(Real, 0, None, closed="both"), None], + "xi": [Interval(Real, 0, 1, closed="both")], + "predecessor_correction": ["boolean"], + "min_cluster_size": [ + Interval(Integral, 2, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="right"), + None, + ], + "algorithm": [StrOptions({"auto", "brute", "ball_tree", "kd_tree"})], + "leaf_size": [Interval(Integral, 1, None, closed="left")], + "memory": [str, HasMethods("cache"), None], + "n_jobs": [Integral, None], + } + + def __init__( + self, + *, + min_samples=5, + max_eps=np.inf, + metric="minkowski", + p=2, + metric_params=None, + cluster_method="xi", + eps=None, + xi=0.05, + predecessor_correction=True, + min_cluster_size=None, + algorithm="auto", + leaf_size=30, + memory=None, + n_jobs=None, + ): + self.max_eps = max_eps + self.min_samples = min_samples + self.min_cluster_size = min_cluster_size + self.algorithm = algorithm + self.metric = metric + self.metric_params = metric_params + self.p = p + self.leaf_size = leaf_size + self.cluster_method = cluster_method + self.eps = eps + self.xi = xi + self.predecessor_correction = predecessor_correction + self.memory = memory + self.n_jobs = n_jobs + + @_fit_context( + # Optics.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Perform OPTICS clustering. + + Extracts an ordered list of points and reachability distances, and + performs initial clustering using ``max_eps`` distance specified at + OPTICS object instantiation. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \ + (n_samples, n_samples) if metric='precomputed' + A feature array, or array of distances between samples if + metric='precomputed'. If a sparse matrix is provided, it will be + converted into CSR format. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + dtype = bool if self.metric in PAIRWISE_BOOLEAN_FUNCTIONS else float + if dtype == bool and X.dtype != bool: + msg = ( + "Data will be converted to boolean for" + f" metric {self.metric}, to avoid this warning," + " you may convert the data prior to calling fit." + ) + warnings.warn(msg, DataConversionWarning) + + X = self._validate_data(X, dtype=dtype, accept_sparse="csr") + if self.metric == "precomputed" and issparse(X): + with warnings.catch_warnings(): + warnings.simplefilter("ignore", SparseEfficiencyWarning) + # Set each diagonal to an explicit value so each point is its + # own neighbor + X.setdiag(X.diagonal()) + memory = check_memory(self.memory) + + ( + self.ordering_, + self.core_distances_, + self.reachability_, + self.predecessor_, + ) = memory.cache(compute_optics_graph)( + X=X, + min_samples=self.min_samples, + algorithm=self.algorithm, + leaf_size=self.leaf_size, + metric=self.metric, + metric_params=self.metric_params, + p=self.p, + n_jobs=self.n_jobs, + max_eps=self.max_eps, + ) + + # Extract clusters from the calculated orders and reachability + if self.cluster_method == "xi": + labels_, clusters_ = cluster_optics_xi( + reachability=self.reachability_, + predecessor=self.predecessor_, + ordering=self.ordering_, + min_samples=self.min_samples, + min_cluster_size=self.min_cluster_size, + xi=self.xi, + predecessor_correction=self.predecessor_correction, + ) + self.cluster_hierarchy_ = clusters_ + elif self.cluster_method == "dbscan": + if self.eps is None: + eps = self.max_eps + else: + eps = self.eps + + if eps > self.max_eps: + raise ValueError( + "Specify an epsilon smaller than %s. Got %s." % (self.max_eps, eps) + ) + + labels_ = cluster_optics_dbscan( + reachability=self.reachability_, + core_distances=self.core_distances_, + ordering=self.ordering_, + eps=eps, + ) + + self.labels_ = labels_ + return self + + +def _validate_size(size, n_samples, param_name): + if size > n_samples: + raise ValueError( + "%s must be no greater than the number of samples (%d). Got %d" + % (param_name, n_samples, size) + ) + + +# OPTICS helper functions +def _compute_core_distances_(X, neighbors, min_samples, working_memory): + """Compute the k-th nearest neighbor of each sample. + + Equivalent to neighbors.kneighbors(X, self.min_samples)[0][:, -1] + but with more memory efficiency. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + neighbors : NearestNeighbors instance + The fitted nearest neighbors estimator. + working_memory : int, default=None + The sought maximum memory for temporary distance matrix chunks. + When None (default), the value of + ``sklearn.get_config()['working_memory']`` is used. + + Returns + ------- + core_distances : ndarray of shape (n_samples,) + Distance at which each sample becomes a core point. + Points which will never be core have a distance of inf. + """ + n_samples = X.shape[0] + core_distances = np.empty(n_samples) + core_distances.fill(np.nan) + + chunk_n_rows = get_chunk_n_rows( + row_bytes=16 * min_samples, max_n_rows=n_samples, working_memory=working_memory + ) + slices = gen_batches(n_samples, chunk_n_rows) + for sl in slices: + core_distances[sl] = neighbors.kneighbors(X[sl], min_samples)[0][:, -1] + return core_distances + + +@validate_params( + { + "X": [np.ndarray, "sparse matrix"], + "min_samples": [ + Interval(Integral, 2, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="both"), + ], + "max_eps": [Interval(Real, 0, None, closed="both")], + "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable], + "p": [Interval(Real, 0, None, closed="right"), None], + "metric_params": [dict, None], + "algorithm": [StrOptions({"auto", "brute", "ball_tree", "kd_tree"})], + "leaf_size": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + }, + prefer_skip_nested_validation=False, # metric is not validated yet +) +def compute_optics_graph( + X, *, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs +): + """Compute the OPTICS reachability graph. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \ + (n_samples, n_samples) if metric='precomputed' + A feature array, or array of distances between samples if + metric='precomputed'. + + min_samples : int > 1 or float between 0 and 1 + The number of samples in a neighborhood for a point to be considered + as a core point. Expressed as an absolute number or a fraction of the + number of samples (rounded to be at least 2). + + max_eps : float, default=np.inf + The maximum distance between two samples for one to be considered as + in the neighborhood of the other. Default value of ``np.inf`` will + identify clusters across all scales; reducing ``max_eps`` will result + in shorter run times. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Any metric from scikit-learn + or scipy.spatial.distance can be used. + + If metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two arrays as input and return one value indicating the + distance between them. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. If metric is + "precomputed", X is assumed to be a distance matrix and must be square. + + Valid values for metric are: + + - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', + 'manhattan'] + + - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', + 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', + 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao', + 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', + 'yule'] + + See the documentation for scipy.spatial.distance for details on these + metrics. + + .. note:: + `'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11. + + p : float, default=2 + Parameter for the Minkowski metric from + :class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`~sklearn.neighbors.BallTree`. + - 'kd_tree' will use :class:`~sklearn.neighbors.KDTree`. + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to `fit` method. (default) + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to :class:`~sklearn.neighbors.BallTree` or + :class:`~sklearn.neighbors.KDTree`. This can affect the speed of the + construction and query, as well as the memory required to store the + tree. The optimal value depends on the nature of the problem. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + ordering_ : array of shape (n_samples,) + The cluster ordered list of sample indices. + + core_distances_ : array of shape (n_samples,) + Distance at which each sample becomes a core point, indexed by object + order. Points which will never be core have a distance of inf. Use + ``clust.core_distances_[clust.ordering_]`` to access in cluster order. + + reachability_ : array of shape (n_samples,) + Reachability distances per sample, indexed by object order. Use + ``clust.reachability_[clust.ordering_]`` to access in cluster order. + + predecessor_ : array of shape (n_samples,) + Point that a sample was reached from, indexed by object order. + Seed points have a predecessor of -1. + + References + ---------- + .. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel, + and Jörg Sander. "OPTICS: ordering points to identify the clustering + structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import compute_optics_graph + >>> X = np.array([[1, 2], [2, 5], [3, 6], + ... [8, 7], [8, 8], [7, 3]]) + >>> ordering, core_distances, reachability, predecessor = compute_optics_graph( + ... X, + ... min_samples=2, + ... max_eps=np.inf, + ... metric="minkowski", + ... p=2, + ... metric_params=None, + ... algorithm="auto", + ... leaf_size=30, + ... n_jobs=None, + ... ) + >>> ordering + array([0, 1, 2, 5, 3, 4]) + >>> core_distances + array([3.16..., 1.41..., 1.41..., 1. , 1. , + 4.12...]) + >>> reachability + array([ inf, 3.16..., 1.41..., 4.12..., 1. , + 5. ]) + >>> predecessor + array([-1, 0, 1, 5, 3, 2]) + """ + n_samples = X.shape[0] + _validate_size(min_samples, n_samples, "min_samples") + if min_samples <= 1: + min_samples = max(2, int(min_samples * n_samples)) + + # Start all points as 'unprocessed' ## + reachability_ = np.empty(n_samples) + reachability_.fill(np.inf) + predecessor_ = np.empty(n_samples, dtype=int) + predecessor_.fill(-1) + + nbrs = NearestNeighbors( + n_neighbors=min_samples, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + metric_params=metric_params, + p=p, + n_jobs=n_jobs, + ) + + nbrs.fit(X) + # Here we first do a kNN query for each point, this differs from + # the original OPTICS that only used epsilon range queries. + # TODO: handle working_memory somehow? + core_distances_ = _compute_core_distances_( + X=X, neighbors=nbrs, min_samples=min_samples, working_memory=None + ) + # OPTICS puts an upper limit on these, use inf for undefined. + core_distances_[core_distances_ > max_eps] = np.inf + np.around( + core_distances_, + decimals=np.finfo(core_distances_.dtype).precision, + out=core_distances_, + ) + + # Main OPTICS loop. Not parallelizable. The order that entries are + # written to the 'ordering_' list is important! + # Note that this implementation is O(n^2) theoretically, but + # supposedly with very low constant factors. + processed = np.zeros(X.shape[0], dtype=bool) + ordering = np.zeros(X.shape[0], dtype=int) + for ordering_idx in range(X.shape[0]): + # Choose next based on smallest reachability distance + # (And prefer smaller ids on ties, possibly np.inf!) + index = np.where(processed == 0)[0] + point = index[np.argmin(reachability_[index])] + + processed[point] = True + ordering[ordering_idx] = point + if core_distances_[point] != np.inf: + _set_reach_dist( + core_distances_=core_distances_, + reachability_=reachability_, + predecessor_=predecessor_, + point_index=point, + processed=processed, + X=X, + nbrs=nbrs, + metric=metric, + metric_params=metric_params, + p=p, + max_eps=max_eps, + ) + if np.all(np.isinf(reachability_)): + warnings.warn( + ( + "All reachability values are inf. Set a larger" + " max_eps or all data will be considered outliers." + ), + UserWarning, + ) + return ordering, core_distances_, reachability_, predecessor_ + + +def _set_reach_dist( + core_distances_, + reachability_, + predecessor_, + point_index, + processed, + X, + nbrs, + metric, + metric_params, + p, + max_eps, +): + P = X[point_index : point_index + 1] + # Assume that radius_neighbors is faster without distances + # and we don't need all distances, nevertheless, this means + # we may be doing some work twice. + indices = nbrs.radius_neighbors(P, radius=max_eps, return_distance=False)[0] + + # Getting indices of neighbors that have not been processed + unproc = np.compress(~np.take(processed, indices), indices) + # Neighbors of current point are already processed. + if not unproc.size: + return + + # Only compute distances to unprocessed neighbors: + if metric == "precomputed": + dists = X[[point_index], unproc] + if isinstance(dists, np.matrix): + dists = np.asarray(dists) + dists = dists.ravel() + else: + _params = dict() if metric_params is None else metric_params.copy() + if metric == "minkowski" and "p" not in _params: + # the same logic as neighbors, p is ignored if explicitly set + # in the dict params + _params["p"] = p + dists = pairwise_distances(P, X[unproc], metric, n_jobs=None, **_params).ravel() + + rdists = np.maximum(dists, core_distances_[point_index]) + np.around(rdists, decimals=np.finfo(rdists.dtype).precision, out=rdists) + improved = np.where(rdists < np.take(reachability_, unproc)) + reachability_[unproc[improved]] = rdists[improved] + predecessor_[unproc[improved]] = point_index + + +@validate_params( + { + "reachability": [np.ndarray], + "core_distances": [np.ndarray], + "ordering": [np.ndarray], + "eps": [Interval(Real, 0, None, closed="both")], + }, + prefer_skip_nested_validation=True, +) +def cluster_optics_dbscan(*, reachability, core_distances, ordering, eps): + """Perform DBSCAN extraction for an arbitrary epsilon. + + Extracting the clusters runs in linear time. Note that this results in + ``labels_`` which are close to a :class:`~sklearn.cluster.DBSCAN` with + similar settings and ``eps``, only if ``eps`` is close to ``max_eps``. + + Parameters + ---------- + reachability : ndarray of shape (n_samples,) + Reachability distances calculated by OPTICS (``reachability_``). + + core_distances : ndarray of shape (n_samples,) + Distances at which points become core (``core_distances_``). + + ordering : ndarray of shape (n_samples,) + OPTICS ordered point indices (``ordering_``). + + eps : float + DBSCAN ``eps`` parameter. Must be set to < ``max_eps``. Results + will be close to DBSCAN algorithm if ``eps`` and ``max_eps`` are close + to one another. + + Returns + ------- + labels_ : array of shape (n_samples,) + The estimated labels. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import cluster_optics_dbscan, compute_optics_graph + >>> X = np.array([[1, 2], [2, 5], [3, 6], + ... [8, 7], [8, 8], [7, 3]]) + >>> ordering, core_distances, reachability, predecessor = compute_optics_graph( + ... X, + ... min_samples=2, + ... max_eps=np.inf, + ... metric="minkowski", + ... p=2, + ... metric_params=None, + ... algorithm="auto", + ... leaf_size=30, + ... n_jobs=None, + ... ) + >>> eps = 4.5 + >>> labels = cluster_optics_dbscan( + ... reachability=reachability, + ... core_distances=core_distances, + ... ordering=ordering, + ... eps=eps, + ... ) + >>> labels + array([0, 0, 0, 1, 1, 1]) + """ + n_samples = len(core_distances) + labels = np.zeros(n_samples, dtype=int) + + far_reach = reachability > eps + near_core = core_distances <= eps + labels[ordering] = np.cumsum(far_reach[ordering] & near_core[ordering]) - 1 + labels[far_reach & ~near_core] = -1 + return labels + + +@validate_params( + { + "reachability": [np.ndarray], + "predecessor": [np.ndarray], + "ordering": [np.ndarray], + "min_samples": [ + Interval(Integral, 2, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="both"), + ], + "min_cluster_size": [ + Interval(Integral, 2, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="both"), + None, + ], + "xi": [Interval(Real, 0, 1, closed="both")], + "predecessor_correction": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def cluster_optics_xi( + *, + reachability, + predecessor, + ordering, + min_samples, + min_cluster_size=None, + xi=0.05, + predecessor_correction=True, +): + """Automatically extract clusters according to the Xi-steep method. + + Parameters + ---------- + reachability : ndarray of shape (n_samples,) + Reachability distances calculated by OPTICS (`reachability_`). + + predecessor : ndarray of shape (n_samples,) + Predecessors calculated by OPTICS. + + ordering : ndarray of shape (n_samples,) + OPTICS ordered point indices (`ordering_`). + + min_samples : int > 1 or float between 0 and 1 + The same as the min_samples given to OPTICS. Up and down steep regions + can't have more then ``min_samples`` consecutive non-steep points. + Expressed as an absolute number or a fraction of the number of samples + (rounded to be at least 2). + + min_cluster_size : int > 1 or float between 0 and 1, default=None + Minimum number of samples in an OPTICS cluster, expressed as an + absolute number or a fraction of the number of samples (rounded to be + at least 2). If ``None``, the value of ``min_samples`` is used instead. + + xi : float between 0 and 1, default=0.05 + Determines the minimum steepness on the reachability plot that + constitutes a cluster boundary. For example, an upwards point in the + reachability plot is defined by the ratio from one point to its + successor being at most 1-xi. + + predecessor_correction : bool, default=True + Correct clusters based on the calculated predecessors. + + Returns + ------- + labels : ndarray of shape (n_samples,) + The labels assigned to samples. Points which are not included + in any cluster are labeled as -1. + + clusters : ndarray of shape (n_clusters, 2) + The list of clusters in the form of ``[start, end]`` in each row, with + all indices inclusive. The clusters are ordered according to ``(end, + -start)`` (ascending) so that larger clusters encompassing smaller + clusters come after such nested smaller clusters. Since ``labels`` does + not reflect the hierarchy, usually ``len(clusters) > + np.unique(labels)``. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import cluster_optics_xi, compute_optics_graph + >>> X = np.array([[1, 2], [2, 5], [3, 6], + ... [8, 7], [8, 8], [7, 3]]) + >>> ordering, core_distances, reachability, predecessor = compute_optics_graph( + ... X, + ... min_samples=2, + ... max_eps=np.inf, + ... metric="minkowski", + ... p=2, + ... metric_params=None, + ... algorithm="auto", + ... leaf_size=30, + ... n_jobs=None + ... ) + >>> min_samples = 2 + >>> labels, clusters = cluster_optics_xi( + ... reachability=reachability, + ... predecessor=predecessor, + ... ordering=ordering, + ... min_samples=min_samples, + ... ) + >>> labels + array([0, 0, 0, 1, 1, 1]) + >>> clusters + array([[0, 2], + [3, 5], + [0, 5]]) + """ + n_samples = len(reachability) + _validate_size(min_samples, n_samples, "min_samples") + if min_samples <= 1: + min_samples = max(2, int(min_samples * n_samples)) + if min_cluster_size is None: + min_cluster_size = min_samples + _validate_size(min_cluster_size, n_samples, "min_cluster_size") + if min_cluster_size <= 1: + min_cluster_size = max(2, int(min_cluster_size * n_samples)) + + clusters = _xi_cluster( + reachability[ordering], + predecessor[ordering], + ordering, + xi, + min_samples, + min_cluster_size, + predecessor_correction, + ) + labels = _extract_xi_labels(ordering, clusters) + return labels, clusters + + +def _extend_region(steep_point, xward_point, start, min_samples): + """Extend the area until it's maximal. + + It's the same function for both upward and downward reagions, depending on + the given input parameters. Assuming: + + - steep_{upward/downward}: bool array indicating whether a point is a + steep {upward/downward}; + - upward/downward: bool array indicating whether a point is + upward/downward; + + To extend an upward reagion, ``steep_point=steep_upward`` and + ``xward_point=downward`` are expected, and to extend a downward region, + ``steep_point=steep_downward`` and ``xward_point=upward``. + + Parameters + ---------- + steep_point : ndarray of shape (n_samples,), dtype=bool + True if the point is steep downward (upward). + + xward_point : ndarray of shape (n_samples,), dtype=bool + True if the point is an upward (respectively downward) point. + + start : int + The start of the xward region. + + min_samples : int + The same as the min_samples given to OPTICS. Up and down steep + regions can't have more then ``min_samples`` consecutive non-steep + points. + + Returns + ------- + index : int + The current index iterating over all the samples, i.e. where we are up + to in our search. + + end : int + The end of the region, which can be behind the index. The region + includes the ``end`` index. + """ + n_samples = len(steep_point) + non_xward_points = 0 + index = start + end = start + # find a maximal area + while index < n_samples: + if steep_point[index]: + non_xward_points = 0 + end = index + elif not xward_point[index]: + # it's not a steep point, but still goes up. + non_xward_points += 1 + # region should include no more than min_samples consecutive + # non steep xward points. + if non_xward_points > min_samples: + break + else: + return end + index += 1 + return end + + +def _update_filter_sdas(sdas, mib, xi_complement, reachability_plot): + """Update steep down areas (SDAs) using the new maximum in between (mib) + value, and the given complement of xi, i.e. ``1 - xi``. + """ + if np.isinf(mib): + return [] + res = [ + sda for sda in sdas if mib <= reachability_plot[sda["start"]] * xi_complement + ] + for sda in res: + sda["mib"] = max(sda["mib"], mib) + return res + + +def _correct_predecessor(reachability_plot, predecessor_plot, ordering, s, e): + """Correct for predecessors. + + Applies Algorithm 2 of [1]_. + + Input parameters are ordered by the computer OPTICS ordering. + + .. [1] Schubert, Erich, Michael Gertz. + "Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of + the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329. + """ + while s < e: + if reachability_plot[s] > reachability_plot[e]: + return s, e + p_e = predecessor_plot[e] + for i in range(s, e): + if p_e == ordering[i]: + return s, e + e -= 1 + return None, None + + +def _xi_cluster( + reachability_plot, + predecessor_plot, + ordering, + xi, + min_samples, + min_cluster_size, + predecessor_correction, +): + """Automatically extract clusters according to the Xi-steep method. + + This is rouphly an implementation of Figure 19 of the OPTICS paper. + + Parameters + ---------- + reachability_plot : array-like of shape (n_samples,) + The reachability plot, i.e. reachability ordered according to + the calculated ordering, all computed by OPTICS. + + predecessor_plot : array-like of shape (n_samples,) + Predecessors ordered according to the calculated ordering. + + xi : float, between 0 and 1 + Determines the minimum steepness on the reachability plot that + constitutes a cluster boundary. For example, an upwards point in the + reachability plot is defined by the ratio from one point to its + successor being at most 1-xi. + + min_samples : int > 1 + The same as the min_samples given to OPTICS. Up and down steep regions + can't have more then ``min_samples`` consecutive non-steep points. + + min_cluster_size : int > 1 + Minimum number of samples in an OPTICS cluster. + + predecessor_correction : bool + Correct clusters based on the calculated predecessors. + + Returns + ------- + clusters : ndarray of shape (n_clusters, 2) + The list of clusters in the form of [start, end] in each row, with all + indices inclusive. The clusters are ordered in a way that larger + clusters encompassing smaller clusters come after those smaller + clusters. + """ + + # Our implementation adds an inf to the end of reachability plot + # this helps to find potential clusters at the end of the + # reachability plot even if there's no upward region at the end of it. + reachability_plot = np.hstack((reachability_plot, np.inf)) + + xi_complement = 1 - xi + sdas = [] # steep down areas, introduced in section 4.3.2 of the paper + clusters = [] + index = 0 + mib = 0.0 # maximum in between, section 4.3.2 + + # Our implementation corrects a mistake in the original + # paper, i.e., in Definition 9 steep downward point, + # r(p) * (1 - x1) <= r(p + 1) should be + # r(p) * (1 - x1) >= r(p + 1) + with np.errstate(invalid="ignore"): + ratio = reachability_plot[:-1] / reachability_plot[1:] + steep_upward = ratio <= xi_complement + steep_downward = ratio >= 1 / xi_complement + downward = ratio > 1 + upward = ratio < 1 + + # the following loop is almost exactly as Figure 19 of the paper. + # it jumps over the areas which are not either steep down or up areas + for steep_index in iter(np.flatnonzero(steep_upward | steep_downward)): + # just continue if steep_index has been a part of a discovered xward + # area. + if steep_index < index: + continue + + mib = max(mib, np.max(reachability_plot[index : steep_index + 1])) + + # steep downward areas + if steep_downward[steep_index]: + sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability_plot) + D_start = steep_index + D_end = _extend_region(steep_downward, upward, D_start, min_samples) + D = {"start": D_start, "end": D_end, "mib": 0.0} + sdas.append(D) + index = D_end + 1 + mib = reachability_plot[index] + + # steep upward areas + else: + sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability_plot) + U_start = steep_index + U_end = _extend_region(steep_upward, downward, U_start, min_samples) + index = U_end + 1 + mib = reachability_plot[index] + + U_clusters = [] + for D in sdas: + c_start = D["start"] + c_end = U_end + + # line (**), sc2* + if reachability_plot[c_end + 1] * xi_complement < D["mib"]: + continue + + # Definition 11: criterion 4 + D_max = reachability_plot[D["start"]] + if D_max * xi_complement >= reachability_plot[c_end + 1]: + # Find the first index from the left side which is almost + # at the same level as the end of the detected cluster. + while ( + reachability_plot[c_start + 1] > reachability_plot[c_end + 1] + and c_start < D["end"] + ): + c_start += 1 + elif reachability_plot[c_end + 1] * xi_complement >= D_max: + # Find the first index from the right side which is almost + # at the same level as the beginning of the detected + # cluster. + # Our implementation corrects a mistake in the original + # paper, i.e., in Definition 11 4c, r(x) < r(sD) should be + # r(x) > r(sD). + while reachability_plot[c_end - 1] > D_max and c_end > U_start: + c_end -= 1 + + # predecessor correction + if predecessor_correction: + c_start, c_end = _correct_predecessor( + reachability_plot, predecessor_plot, ordering, c_start, c_end + ) + if c_start is None: + continue + + # Definition 11: criterion 3.a + if c_end - c_start + 1 < min_cluster_size: + continue + + # Definition 11: criterion 1 + if c_start > D["end"]: + continue + + # Definition 11: criterion 2 + if c_end < U_start: + continue + + U_clusters.append((c_start, c_end)) + + # add smaller clusters first. + U_clusters.reverse() + clusters.extend(U_clusters) + + return np.array(clusters) + + +def _extract_xi_labels(ordering, clusters): + """Extracts the labels from the clusters returned by `_xi_cluster`. + We rely on the fact that clusters are stored + with the smaller clusters coming before the larger ones. + + Parameters + ---------- + ordering : array-like of shape (n_samples,) + The ordering of points calculated by OPTICS + + clusters : array-like of shape (n_clusters, 2) + List of clusters i.e. (start, end) tuples, + as returned by `_xi_cluster`. + + Returns + ------- + labels : ndarray of shape (n_samples,) + """ + + labels = np.full(len(ordering), -1, dtype=int) + label = 0 + for c in clusters: + if not np.any(labels[c[0] : (c[1] + 1)] != -1): + labels[c[0] : (c[1] + 1)] = label + label += 1 + labels[ordering] = labels.copy() + return labels diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_spectral.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..d925a2ff56bc4e260633f2076358617dacb1c4c2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/_spectral.py @@ -0,0 +1,799 @@ +"""Algorithms for spectral clustering""" + +# Author: Gael Varoquaux +# Brian Cheung +# Wei LI +# Andrew Knyazev +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy.linalg import LinAlgError, qr, svd +from scipy.sparse import csc_matrix + +from ..base import BaseEstimator, ClusterMixin, _fit_context +from ..manifold import spectral_embedding +from ..metrics.pairwise import KERNEL_PARAMS, pairwise_kernels +from ..neighbors import NearestNeighbors, kneighbors_graph +from ..utils import as_float_array, check_random_state +from ..utils._param_validation import Interval, StrOptions, validate_params +from ._kmeans import k_means + + +def cluster_qr(vectors): + """Find the discrete partition closest to the eigenvector embedding. + + This implementation was proposed in [1]_. + + .. versionadded:: 1.1 + + Parameters + ---------- + vectors : array-like, shape: (n_samples, n_clusters) + The embedding space of the samples. + + Returns + ------- + labels : array of integers, shape: n_samples + The cluster labels of vectors. + + References + ---------- + .. [1] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019 + Anil Damle, Victor Minden, Lexing Ying + <10.1093/imaiai/iay008>` + + """ + + k = vectors.shape[1] + _, _, piv = qr(vectors.T, pivoting=True) + ut, _, v = svd(vectors[piv[:k], :].T) + vectors = abs(np.dot(vectors, np.dot(ut, v.conj()))) + return vectors.argmax(axis=1) + + +def discretize( + vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None +): + """Search for a partition matrix which is closest to the eigenvector embedding. + + This implementation was proposed in [1]_. + + Parameters + ---------- + vectors : array-like of shape (n_samples, n_clusters) + The embedding space of the samples. + + copy : bool, default=True + Whether to copy vectors, or perform in-place normalization. + + max_svd_restarts : int, default=30 + Maximum number of attempts to restart SVD if convergence fails + + n_iter_max : int, default=30 + Maximum number of iterations to attempt in rotation and partition + matrix search if machine precision convergence is not reached + + random_state : int, RandomState instance, default=None + Determines random number generation for rotation matrix initialization. + Use an int to make the randomness deterministic. + See :term:`Glossary `. + + Returns + ------- + labels : array of integers, shape: n_samples + The labels of the clusters. + + References + ---------- + + .. [1] `Multiclass spectral clustering, 2003 + Stella X. Yu, Jianbo Shi + `_ + + Notes + ----- + + The eigenvector embedding is used to iteratively search for the + closest discrete partition. First, the eigenvector embedding is + normalized to the space of partition matrices. An optimal discrete + partition matrix closest to this normalized embedding multiplied by + an initial rotation is calculated. Fixing this discrete partition + matrix, an optimal rotation matrix is calculated. These two + calculations are performed until convergence. The discrete partition + matrix is returned as the clustering solution. Used in spectral + clustering, this method tends to be faster and more robust to random + initialization than k-means. + + """ + + random_state = check_random_state(random_state) + + vectors = as_float_array(vectors, copy=copy) + + eps = np.finfo(float).eps + n_samples, n_components = vectors.shape + + # Normalize the eigenvectors to an equal length of a vector of ones. + # Reorient the eigenvectors to point in the negative direction with respect + # to the first element. This may have to do with constraining the + # eigenvectors to lie in a specific quadrant to make the discretization + # search easier. + norm_ones = np.sqrt(n_samples) + for i in range(vectors.shape[1]): + vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) * norm_ones + if vectors[0, i] != 0: + vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i]) + + # Normalize the rows of the eigenvectors. Samples should lie on the unit + # hypersphere centered at the origin. This transforms the samples in the + # embedding space to the space of partition matrices. + vectors = vectors / np.sqrt((vectors**2).sum(axis=1))[:, np.newaxis] + + svd_restarts = 0 + has_converged = False + + # If there is an exception we try to randomize and rerun SVD again + # do this max_svd_restarts times. + while (svd_restarts < max_svd_restarts) and not has_converged: + # Initialize first column of rotation matrix with a row of the + # eigenvectors + rotation = np.zeros((n_components, n_components)) + rotation[:, 0] = vectors[random_state.randint(n_samples), :].T + + # To initialize the rest of the rotation matrix, find the rows + # of the eigenvectors that are as orthogonal to each other as + # possible + c = np.zeros(n_samples) + for j in range(1, n_components): + # Accumulate c to ensure row is as orthogonal as possible to + # previous picks as well as current one + c += np.abs(np.dot(vectors, rotation[:, j - 1])) + rotation[:, j] = vectors[c.argmin(), :].T + + last_objective_value = 0.0 + n_iter = 0 + + while not has_converged: + n_iter += 1 + + t_discrete = np.dot(vectors, rotation) + + labels = t_discrete.argmax(axis=1) + vectors_discrete = csc_matrix( + (np.ones(len(labels)), (np.arange(0, n_samples), labels)), + shape=(n_samples, n_components), + ) + + t_svd = vectors_discrete.T * vectors + + try: + U, S, Vh = np.linalg.svd(t_svd) + except LinAlgError: + svd_restarts += 1 + print("SVD did not converge, randomizing and trying again") + break + + ncut_value = 2.0 * (n_samples - S.sum()) + if (abs(ncut_value - last_objective_value) < eps) or (n_iter > n_iter_max): + has_converged = True + else: + # otherwise calculate rotation and continue + last_objective_value = ncut_value + rotation = np.dot(Vh.T, U.T) + + if not has_converged: + raise LinAlgError("SVD did not converge") + return labels + + +@validate_params( + {"affinity": ["array-like", "sparse matrix"]}, + prefer_skip_nested_validation=False, +) +def spectral_clustering( + affinity, + *, + n_clusters=8, + n_components=None, + eigen_solver=None, + random_state=None, + n_init=10, + eigen_tol="auto", + assign_labels="kmeans", + verbose=False, +): + """Apply clustering to a projection of the normalized Laplacian. + + In practice Spectral Clustering is very useful when the structure of + the individual clusters is highly non-convex or more generally when + a measure of the center and spread of the cluster is not a suitable + description of the complete cluster. For instance, when clusters are + nested circles on the 2D plane. + + If affinity is the adjacency matrix of a graph, this method can be + used to find normalized graph cuts [1]_, [2]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + affinity : {array-like, sparse matrix} of shape (n_samples, n_samples) + The affinity matrix describing the relationship of the samples to + embed. **Must be symmetric**. + + Possible examples: + - adjacency matrix of a graph, + - heat kernel of the pairwise distance matrix of the samples, + - symmetric k-nearest neighbours connectivity matrix of the samples. + + n_clusters : int, default=None + Number of clusters to extract. + + n_components : int, default=n_clusters + Number of eigenvectors to use for the spectral embedding. + + eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} + The eigenvalue decomposition method. If None then ``'arpack'`` is used. + See [4]_ for more details regarding ``'lobpcg'``. + Eigensolver ``'amg'`` runs ``'lobpcg'`` with optional + Algebraic MultiGrid preconditioning and requires pyamg to be installed. + It can be faster on very large sparse problems [6]_ and [7]_. + + random_state : int, RandomState instance, default=None + A pseudo random number generator used for the initialization + of the lobpcg eigenvectors decomposition when `eigen_solver == + 'amg'`, and for the K-Means initialization. Use an int to make + the results deterministic across calls (See + :term:`Glossary `). + + .. note:: + When using `eigen_solver == 'amg'`, + it is necessary to also fix the global numpy seed with + `np.random.seed(int)` to get deterministic results. See + https://github.com/pyamg/pyamg/issues/139 for further + information. + + n_init : int, default=10 + Number of time the k-means algorithm will be run with different + centroid seeds. The final results will be the best output of n_init + consecutive runs in terms of inertia. Only used if + ``assign_labels='kmeans'``. + + eigen_tol : float, default="auto" + Stopping criterion for eigendecomposition of the Laplacian matrix. + If `eigen_tol="auto"` then the passed tolerance will depend on the + `eigen_solver`: + + - If `eigen_solver="arpack"`, then `eigen_tol=0.0`; + - If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then + `eigen_tol=None` which configures the underlying `lobpcg` solver to + automatically resolve the value according to their heuristics. See, + :func:`scipy.sparse.linalg.lobpcg` for details. + + Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"` + values of `tol<1e-5` may lead to convergence issues and should be + avoided. + + .. versionadded:: 1.2 + Added 'auto' option. + + assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans' + The strategy to use to assign labels in the embedding + space. There are three ways to assign labels after the Laplacian + embedding. k-means can be applied and is a popular choice. But it can + also be sensitive to initialization. Discretization is another + approach which is less sensitive to random initialization [3]_. + The cluster_qr method [5]_ directly extracts clusters from eigenvectors + in spectral clustering. In contrast to k-means and discretization, cluster_qr + has no tuning parameters and is not an iterative method, yet may outperform + k-means and discretization in terms of both quality and speed. + + .. versionchanged:: 1.1 + Added new labeling method 'cluster_qr'. + + verbose : bool, default=False + Verbosity mode. + + .. versionadded:: 0.24 + + Returns + ------- + labels : array of integers, shape: n_samples + The labels of the clusters. + + Notes + ----- + The graph should contain only one connected component, elsewhere + the results make little sense. + + This algorithm solves the normalized cut for `k=2`: it is a + normalized spectral clustering. + + References + ---------- + + .. [1] :doi:`Normalized cuts and image segmentation, 2000 + Jianbo Shi, Jitendra Malik + <10.1109/34.868688>` + + .. [2] :doi:`A Tutorial on Spectral Clustering, 2007 + Ulrike von Luxburg + <10.1007/s11222-007-9033-z>` + + .. [3] `Multiclass spectral clustering, 2003 + Stella X. Yu, Jianbo Shi + `_ + + .. [4] :doi:`Toward the Optimal Preconditioned Eigensolver: + Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001 + A. V. Knyazev + SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541. + <10.1137/S1064827500366124>` + + .. [5] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019 + Anil Damle, Victor Minden, Lexing Ying + <10.1093/imaiai/iay008>` + + .. [6] :doi:`Multiscale Spectral Image Segmentation Multiscale preconditioning + for computing eigenvalues of graph Laplacians in image segmentation, 2006 + Andrew Knyazev + <10.13140/RG.2.2.35280.02565>` + + .. [7] :doi:`Preconditioned spectral clustering for stochastic block partition + streaming graph challenge (Preliminary version at arXiv.) + David Zhuzhunashvili, Andrew Knyazev + <10.1109/HPEC.2017.8091045>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics.pairwise import pairwise_kernels + >>> from sklearn.cluster import spectral_clustering + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> affinity = pairwise_kernels(X, metric='rbf') + >>> spectral_clustering( + ... affinity=affinity, n_clusters=2, assign_labels="discretize", random_state=0 + ... ) + array([1, 1, 1, 0, 0, 0]) + """ + + clusterer = SpectralClustering( + n_clusters=n_clusters, + n_components=n_components, + eigen_solver=eigen_solver, + random_state=random_state, + n_init=n_init, + affinity="precomputed", + eigen_tol=eigen_tol, + assign_labels=assign_labels, + verbose=verbose, + ).fit(affinity) + + return clusterer.labels_ + + +class SpectralClustering(ClusterMixin, BaseEstimator): + """Apply clustering to a projection of the normalized Laplacian. + + In practice Spectral Clustering is very useful when the structure of + the individual clusters is highly non-convex, or more generally when + a measure of the center and spread of the cluster is not a suitable + description of the complete cluster, such as when clusters are + nested circles on the 2D plane. + + If the affinity matrix is the adjacency matrix of a graph, this method + can be used to find normalized graph cuts [1]_, [2]_. + + When calling ``fit``, an affinity matrix is constructed using either + a kernel function such the Gaussian (aka RBF) kernel with Euclidean + distance ``d(X, X)``:: + + np.exp(-gamma * d(X,X) ** 2) + + or a k-nearest neighbors connectivity matrix. + + Alternatively, a user-provided affinity matrix can be specified by + setting ``affinity='precomputed'``. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_clusters : int, default=8 + The dimension of the projection subspace. + + eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None + The eigenvalue decomposition strategy to use. AMG requires pyamg + to be installed. It can be faster on very large, sparse problems, + but may also lead to instabilities. If None, then ``'arpack'`` is + used. See [4]_ for more details regarding `'lobpcg'`. + + n_components : int, default=None + Number of eigenvectors to use for the spectral embedding. If None, + defaults to `n_clusters`. + + random_state : int, RandomState instance, default=None + A pseudo random number generator used for the initialization + of the lobpcg eigenvectors decomposition when `eigen_solver == + 'amg'`, and for the K-Means initialization. Use an int to make + the results deterministic across calls (See + :term:`Glossary `). + + .. note:: + When using `eigen_solver == 'amg'`, + it is necessary to also fix the global numpy seed with + `np.random.seed(int)` to get deterministic results. See + https://github.com/pyamg/pyamg/issues/139 for further + information. + + n_init : int, default=10 + Number of time the k-means algorithm will be run with different + centroid seeds. The final results will be the best output of n_init + consecutive runs in terms of inertia. Only used if + ``assign_labels='kmeans'``. + + gamma : float, default=1.0 + Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels. + Ignored for ``affinity='nearest_neighbors'``. + + affinity : str or callable, default='rbf' + How to construct the affinity matrix. + - 'nearest_neighbors': construct the affinity matrix by computing a + graph of nearest neighbors. + - 'rbf': construct the affinity matrix using a radial basis function + (RBF) kernel. + - 'precomputed': interpret ``X`` as a precomputed affinity matrix, + where larger values indicate greater similarity between instances. + - 'precomputed_nearest_neighbors': interpret ``X`` as a sparse graph + of precomputed distances, and construct a binary affinity matrix + from the ``n_neighbors`` nearest neighbors of each instance. + - one of the kernels supported by + :func:`~sklearn.metrics.pairwise.pairwise_kernels`. + + Only kernels that produce similarity scores (non-negative values that + increase with similarity) should be used. This property is not checked + by the clustering algorithm. + + n_neighbors : int, default=10 + Number of neighbors to use when constructing the affinity matrix using + the nearest neighbors method. Ignored for ``affinity='rbf'``. + + eigen_tol : float, default="auto" + Stopping criterion for eigen decomposition of the Laplacian matrix. + If `eigen_tol="auto"` then the passed tolerance will depend on the + `eigen_solver`: + + - If `eigen_solver="arpack"`, then `eigen_tol=0.0`; + - If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then + `eigen_tol=None` which configures the underlying `lobpcg` solver to + automatically resolve the value according to their heuristics. See, + :func:`scipy.sparse.linalg.lobpcg` for details. + + Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"` + values of `tol<1e-5` may lead to convergence issues and should be + avoided. + + .. versionadded:: 1.2 + Added 'auto' option. + + assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans' + The strategy for assigning labels in the embedding space. There are two + ways to assign labels after the Laplacian embedding. k-means is a + popular choice, but it can be sensitive to initialization. + Discretization is another approach which is less sensitive to random + initialization [3]_. + The cluster_qr method [5]_ directly extract clusters from eigenvectors + in spectral clustering. In contrast to k-means and discretization, cluster_qr + has no tuning parameters and runs no iterations, yet may outperform + k-means and discretization in terms of both quality and speed. + + .. versionchanged:: 1.1 + Added new labeling method 'cluster_qr'. + + degree : float, default=3 + Degree of the polynomial kernel. Ignored by other kernels. + + coef0 : float, default=1 + Zero coefficient for polynomial and sigmoid kernels. + Ignored by other kernels. + + kernel_params : dict of str to any, default=None + Parameters (keyword arguments) and values for kernel passed as + callable object. Ignored by other kernels. + + n_jobs : int, default=None + The number of parallel jobs to run when `affinity='nearest_neighbors'` + or `affinity='precomputed_nearest_neighbors'`. The neighbors search + will be done in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : bool, default=False + Verbosity mode. + + .. versionadded:: 0.24 + + Attributes + ---------- + affinity_matrix_ : array-like of shape (n_samples, n_samples) + Affinity matrix used for clustering. Available only after calling + ``fit``. + + labels_ : ndarray of shape (n_samples,) + Labels of each point + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.cluster.KMeans : K-Means clustering. + sklearn.cluster.DBSCAN : Density-Based Spatial Clustering of + Applications with Noise. + + Notes + ----- + A distance matrix for which 0 indicates identical elements and high values + indicate very dissimilar elements can be transformed into an affinity / + similarity matrix that is well-suited for the algorithm by + applying the Gaussian (aka RBF, heat) kernel:: + + np.exp(- dist_matrix ** 2 / (2. * delta ** 2)) + + where ``delta`` is a free parameter representing the width of the Gaussian + kernel. + + An alternative is to take a symmetric version of the k-nearest neighbors + connectivity matrix of the points. + + If the pyamg package is installed, it is used: this greatly + speeds up computation. + + References + ---------- + .. [1] :doi:`Normalized cuts and image segmentation, 2000 + Jianbo Shi, Jitendra Malik + <10.1109/34.868688>` + + .. [2] :doi:`A Tutorial on Spectral Clustering, 2007 + Ulrike von Luxburg + <10.1007/s11222-007-9033-z>` + + .. [3] `Multiclass spectral clustering, 2003 + Stella X. Yu, Jianbo Shi + `_ + + .. [4] :doi:`Toward the Optimal Preconditioned Eigensolver: + Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001 + A. V. Knyazev + SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541. + <10.1137/S1064827500366124>` + + .. [5] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019 + Anil Damle, Victor Minden, Lexing Ying + <10.1093/imaiai/iay008>` + + Examples + -------- + >>> from sklearn.cluster import SpectralClustering + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> clustering = SpectralClustering(n_clusters=2, + ... assign_labels='discretize', + ... random_state=0).fit(X) + >>> clustering.labels_ + array([1, 1, 1, 0, 0, 0]) + >>> clustering + SpectralClustering(assign_labels='discretize', n_clusters=2, + random_state=0) + """ + + _parameter_constraints: dict = { + "n_clusters": [Interval(Integral, 1, None, closed="left")], + "eigen_solver": [StrOptions({"arpack", "lobpcg", "amg"}), None], + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "random_state": ["random_state"], + "n_init": [Interval(Integral, 1, None, closed="left")], + "gamma": [Interval(Real, 0, None, closed="left")], + "affinity": [ + callable, + StrOptions( + set(KERNEL_PARAMS) + | {"nearest_neighbors", "precomputed", "precomputed_nearest_neighbors"} + ), + ], + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "eigen_tol": [ + Interval(Real, 0.0, None, closed="left"), + StrOptions({"auto"}), + ], + "assign_labels": [StrOptions({"kmeans", "discretize", "cluster_qr"})], + "degree": [Interval(Real, 0, None, closed="left")], + "coef0": [Interval(Real, None, None, closed="neither")], + "kernel_params": [dict, None], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + } + + def __init__( + self, + n_clusters=8, + *, + eigen_solver=None, + n_components=None, + random_state=None, + n_init=10, + gamma=1.0, + affinity="rbf", + n_neighbors=10, + eigen_tol="auto", + assign_labels="kmeans", + degree=3, + coef0=1, + kernel_params=None, + n_jobs=None, + verbose=False, + ): + self.n_clusters = n_clusters + self.eigen_solver = eigen_solver + self.n_components = n_components + self.random_state = random_state + self.n_init = n_init + self.gamma = gamma + self.affinity = affinity + self.n_neighbors = n_neighbors + self.eigen_tol = eigen_tol + self.assign_labels = assign_labels + self.degree = degree + self.coef0 = coef0 + self.kernel_params = kernel_params + self.n_jobs = n_jobs + self.verbose = verbose + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Perform spectral clustering from features, or affinity matrix. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) + Training instances to cluster, similarities / affinities between + instances if ``affinity='precomputed'``, or distances between + instances if ``affinity='precomputed_nearest_neighbors``. If a + sparse matrix is provided in a format other than ``csr_matrix``, + ``csc_matrix``, or ``coo_matrix``, it will be converted into a + sparse ``csr_matrix``. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + A fitted instance of the estimator. + """ + X = self._validate_data( + X, + accept_sparse=["csr", "csc", "coo"], + dtype=np.float64, + ensure_min_samples=2, + ) + allow_squared = self.affinity in [ + "precomputed", + "precomputed_nearest_neighbors", + ] + if X.shape[0] == X.shape[1] and not allow_squared: + warnings.warn( + "The spectral clustering API has changed. ``fit``" + "now constructs an affinity matrix from data. To use" + " a custom affinity matrix, " + "set ``affinity=precomputed``." + ) + + if self.affinity == "nearest_neighbors": + connectivity = kneighbors_graph( + X, n_neighbors=self.n_neighbors, include_self=True, n_jobs=self.n_jobs + ) + self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T) + elif self.affinity == "precomputed_nearest_neighbors": + estimator = NearestNeighbors( + n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed" + ).fit(X) + connectivity = estimator.kneighbors_graph(X=X, mode="connectivity") + self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T) + elif self.affinity == "precomputed": + self.affinity_matrix_ = X + else: + params = self.kernel_params + if params is None: + params = {} + if not callable(self.affinity): + params["gamma"] = self.gamma + params["degree"] = self.degree + params["coef0"] = self.coef0 + self.affinity_matrix_ = pairwise_kernels( + X, metric=self.affinity, filter_params=True, **params + ) + + random_state = check_random_state(self.random_state) + n_components = ( + self.n_clusters if self.n_components is None else self.n_components + ) + # We now obtain the real valued solution matrix to the + # relaxed Ncut problem, solving the eigenvalue problem + # L_sym x = lambda x and recovering u = D^-1/2 x. + # The first eigenvector is constant only for fully connected graphs + # and should be kept for spectral clustering (drop_first = False) + # See spectral_embedding documentation. + maps = spectral_embedding( + self.affinity_matrix_, + n_components=n_components, + eigen_solver=self.eigen_solver, + random_state=random_state, + eigen_tol=self.eigen_tol, + drop_first=False, + ) + if self.verbose: + print(f"Computing label assignment using {self.assign_labels}") + + if self.assign_labels == "kmeans": + _, self.labels_, _ = k_means( + maps, + self.n_clusters, + random_state=random_state, + n_init=self.n_init, + verbose=self.verbose, + ) + elif self.assign_labels == "cluster_qr": + self.labels_ = cluster_qr(maps) + else: + self.labels_ = discretize(maps, random_state=random_state) + + return self + + def fit_predict(self, X, y=None): + """Perform spectral clustering on `X` and return cluster labels. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) + Training instances to cluster, similarities / affinities between + instances if ``affinity='precomputed'``, or distances between + instances if ``affinity='precomputed_nearest_neighbors``. If a + sparse matrix is provided in a format other than ``csr_matrix``, + ``csc_matrix``, or ``coo_matrix``, it will be converted into a + sparse ``csr_matrix``. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Cluster labels. + """ + return super().fit_predict(X, y) + + def _more_tags(self): + return { + "pairwise": self.affinity in [ + "precomputed", + "precomputed_nearest_neighbors", + ] + } diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd4d4a9167325aba2846e84b88481c359b9fb6f1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..452ba30959eb91b3463eb49a289d605ca440e591 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54bb2a848b2bb4bd0cd5bf31b7f8fbc41e8cfb46 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..587256ad39b576071d6e7503ca3784b6f59b986a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90f6cb46c59b4b0c79f2e02805ff38002ce2fa51 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29f3bf5e34d30ca489ba95c3a83debd13c0ef056 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_dbscan.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_dbscan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f248d487e2a27d36bc714f33c899125fbe1d826a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_dbscan.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_feature_agglomeration.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_feature_agglomeration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6cd54a34db9b850421c67748a924fc6c482ef10 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_feature_agglomeration.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hdbscan.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hdbscan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0eb14fab8eaff672cee49722fe18ccb83df9df4a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hdbscan.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hierarchical.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hierarchical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..decbaf4be12c21d4348f43be246291d3be812428 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hierarchical.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_k_means.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_k_means.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf44a2c48cfcd6db2f8485611754401cd88b8773 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_k_means.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_mean_shift.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_mean_shift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91117764757b8ef6a6623745615e477127847a94 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_mean_shift.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_optics.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_optics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f45dea38cf1030d2d91412b699ca0b93b9391f7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_optics.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_spectral.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_spectral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12136e3eb8faa62098b4d4c4527c1ff355f2b154 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_spectral.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/common.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/common.py new file mode 100644 index 0000000000000000000000000000000000000000..b1fe047fe230af1c3fbb2ec0b72f3ef20e5aa3aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/common.py @@ -0,0 +1,37 @@ +""" +Common utilities for testing clustering. + +""" + +import numpy as np + +############################################################################### +# Generate sample data + + +def generate_clustered_data( + seed=0, n_clusters=3, n_features=2, n_samples_per_cluster=20, std=0.4 +): + prng = np.random.RandomState(seed) + + # the data is voluntary shifted away from zero to check clustering + # algorithm robustness with regards to non centered data + means = ( + np.array( + [ + [1, 1, 1, 0], + [-1, -1, 0, 1], + [1, -1, 1, 1], + [-1, 1, 1, 0], + ] + ) + + 10 + ) + + X = np.empty((0, n_features)) + for i in range(n_clusters): + X = np.r_[ + X, + means[i][:n_features] + std * prng.randn(n_samples_per_cluster, n_features), + ] + return X diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_affinity_propagation.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_affinity_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..c3138e59111ed849988dd0e6d3433a4bb251e2a1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_affinity_propagation.py @@ -0,0 +1,321 @@ +""" +Testing for Clustering methods + +""" + +import warnings + +import numpy as np +import pytest + +from sklearn.cluster import AffinityPropagation, affinity_propagation +from sklearn.cluster._affinity_propagation import _equal_similarities_and_preferences +from sklearn.datasets import make_blobs +from sklearn.exceptions import ConvergenceWarning, NotFittedError +from sklearn.metrics import euclidean_distances +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import CSR_CONTAINERS + +n_clusters = 3 +centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 +X, _ = make_blobs( + n_samples=60, + n_features=2, + centers=centers, + cluster_std=0.4, + shuffle=True, + random_state=0, +) + +# TODO: AffinityPropagation must preserve dtype for its fitted attributes +# and test must be created accordingly to this new behavior. +# For more details, see: https://github.com/scikit-learn/scikit-learn/issues/11000 + + +def test_affinity_propagation(global_random_seed, global_dtype): + """Test consistency of the affinity propagations.""" + S = -euclidean_distances(X.astype(global_dtype, copy=False), squared=True) + preference = np.median(S) * 10 + cluster_centers_indices, labels = affinity_propagation( + S, preference=preference, random_state=global_random_seed + ) + + n_clusters_ = len(cluster_centers_indices) + + assert n_clusters == n_clusters_ + + +def test_affinity_propagation_precomputed(): + """Check equality of precomputed affinity matrix to internally computed affinity + matrix. + """ + S = -euclidean_distances(X, squared=True) + preference = np.median(S) * 10 + af = AffinityPropagation( + preference=preference, affinity="precomputed", random_state=28 + ) + labels_precomputed = af.fit(S).labels_ + + af = AffinityPropagation(preference=preference, verbose=True, random_state=37) + labels = af.fit(X).labels_ + + assert_array_equal(labels, labels_precomputed) + + cluster_centers_indices = af.cluster_centers_indices_ + + n_clusters_ = len(cluster_centers_indices) + assert np.unique(labels).size == n_clusters_ + assert n_clusters == n_clusters_ + + +def test_affinity_propagation_no_copy(): + """Check behaviour of not copying the input data.""" + S = -euclidean_distances(X, squared=True) + S_original = S.copy() + preference = np.median(S) * 10 + assert not np.allclose(S.diagonal(), preference) + + # with copy=True S should not be modified + affinity_propagation(S, preference=preference, copy=True, random_state=0) + assert_allclose(S, S_original) + assert not np.allclose(S.diagonal(), preference) + assert_allclose(S.diagonal(), np.zeros(S.shape[0])) + + # with copy=False S will be modified inplace + affinity_propagation(S, preference=preference, copy=False, random_state=0) + assert_allclose(S.diagonal(), preference) + + # test that copy=True and copy=False lead to the same result + S = S_original.copy() + af = AffinityPropagation(preference=preference, verbose=True, random_state=0) + + labels = af.fit(X).labels_ + _, labels_no_copy = affinity_propagation( + S, preference=preference, copy=False, random_state=74 + ) + assert_array_equal(labels, labels_no_copy) + + +def test_affinity_propagation_affinity_shape(): + """Check the shape of the affinity matrix when using `affinity_propagation.""" + S = -euclidean_distances(X, squared=True) + err_msg = "The matrix of similarities must be a square array" + with pytest.raises(ValueError, match=err_msg): + affinity_propagation(S[:, :-1]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_affinity_propagation_precomputed_with_sparse_input(csr_container): + err_msg = "Sparse data was passed for X, but dense data is required" + with pytest.raises(TypeError, match=err_msg): + AffinityPropagation(affinity="precomputed").fit(csr_container((3, 3))) + + +def test_affinity_propagation_predict(global_random_seed, global_dtype): + # Test AffinityPropagation.predict + af = AffinityPropagation(affinity="euclidean", random_state=global_random_seed) + X_ = X.astype(global_dtype, copy=False) + labels = af.fit_predict(X_) + labels2 = af.predict(X_) + assert_array_equal(labels, labels2) + + +def test_affinity_propagation_predict_error(): + # Test exception in AffinityPropagation.predict + # Not fitted. + af = AffinityPropagation(affinity="euclidean") + with pytest.raises(NotFittedError): + af.predict(X) + + # Predict not supported when affinity="precomputed". + S = np.dot(X, X.T) + af = AffinityPropagation(affinity="precomputed", random_state=57) + af.fit(S) + with pytest.raises(ValueError, match="expecting 60 features as input"): + af.predict(X) + + +def test_affinity_propagation_fit_non_convergence(global_dtype): + # In case of non-convergence of affinity_propagation(), the cluster + # centers should be an empty array and training samples should be labelled + # as noise (-1) + X = np.array([[0, 0], [1, 1], [-2, -2]], dtype=global_dtype) + + # Force non-convergence by allowing only a single iteration + af = AffinityPropagation(preference=-10, max_iter=1, random_state=82) + + with pytest.warns(ConvergenceWarning): + af.fit(X) + assert_allclose(np.empty((0, 2)), af.cluster_centers_) + assert_array_equal(np.array([-1, -1, -1]), af.labels_) + + +def test_affinity_propagation_equal_mutual_similarities(global_dtype): + X = np.array([[-1, 1], [1, -1]], dtype=global_dtype) + S = -euclidean_distances(X, squared=True) + + # setting preference > similarity + with pytest.warns(UserWarning, match="mutually equal"): + cluster_center_indices, labels = affinity_propagation(S, preference=0) + + # expect every sample to become an exemplar + assert_array_equal([0, 1], cluster_center_indices) + assert_array_equal([0, 1], labels) + + # setting preference < similarity + with pytest.warns(UserWarning, match="mutually equal"): + cluster_center_indices, labels = affinity_propagation(S, preference=-10) + + # expect one cluster, with arbitrary (first) sample as exemplar + assert_array_equal([0], cluster_center_indices) + assert_array_equal([0, 0], labels) + + # setting different preferences + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + cluster_center_indices, labels = affinity_propagation( + S, preference=[-20, -10], random_state=37 + ) + + # expect one cluster, with highest-preference sample as exemplar + assert_array_equal([1], cluster_center_indices) + assert_array_equal([0, 0], labels) + + +def test_affinity_propagation_predict_non_convergence(global_dtype): + # In case of non-convergence of affinity_propagation(), the cluster + # centers should be an empty array + X = np.array([[0, 0], [1, 1], [-2, -2]], dtype=global_dtype) + + # Force non-convergence by allowing only a single iteration + with pytest.warns(ConvergenceWarning): + af = AffinityPropagation(preference=-10, max_iter=1, random_state=75).fit(X) + + # At prediction time, consider new samples as noise since there are no + # clusters + to_predict = np.array([[2, 2], [3, 3], [4, 4]]) + with pytest.warns(ConvergenceWarning): + y = af.predict(to_predict) + assert_array_equal(np.array([-1, -1, -1]), y) + + +def test_affinity_propagation_non_convergence_regressiontest(global_dtype): + X = np.array( + [[1, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 1]], dtype=global_dtype + ) + af = AffinityPropagation(affinity="euclidean", max_iter=2, random_state=34) + msg = ( + "Affinity propagation did not converge, this model may return degenerate" + " cluster centers and labels." + ) + with pytest.warns(ConvergenceWarning, match=msg): + af.fit(X) + + assert_array_equal(np.array([0, 0, 0]), af.labels_) + + +def test_equal_similarities_and_preferences(global_dtype): + # Unequal distances + X = np.array([[0, 0], [1, 1], [-2, -2]], dtype=global_dtype) + S = -euclidean_distances(X, squared=True) + + assert not _equal_similarities_and_preferences(S, np.array(0)) + assert not _equal_similarities_and_preferences(S, np.array([0, 0])) + assert not _equal_similarities_and_preferences(S, np.array([0, 1])) + + # Equal distances + X = np.array([[0, 0], [1, 1]], dtype=global_dtype) + S = -euclidean_distances(X, squared=True) + + # Different preferences + assert not _equal_similarities_and_preferences(S, np.array([0, 1])) + + # Same preferences + assert _equal_similarities_and_preferences(S, np.array([0, 0])) + assert _equal_similarities_and_preferences(S, np.array(0)) + + +def test_affinity_propagation_random_state(): + """Check that different random states lead to different initialisations + by looking at the center locations after two iterations. + """ + centers = [[1, 1], [-1, -1], [1, -1]] + X, labels_true = make_blobs( + n_samples=300, centers=centers, cluster_std=0.5, random_state=0 + ) + # random_state = 0 + ap = AffinityPropagation(convergence_iter=1, max_iter=2, random_state=0) + ap.fit(X) + centers0 = ap.cluster_centers_ + + # random_state = 76 + ap = AffinityPropagation(convergence_iter=1, max_iter=2, random_state=76) + ap.fit(X) + centers76 = ap.cluster_centers_ + # check that the centers have not yet converged to the same solution + assert np.mean((centers0 - centers76) ** 2) > 1 + + +@pytest.mark.parametrize("container", CSR_CONTAINERS + [np.array]) +def test_affinity_propagation_convergence_warning_dense_sparse(container, global_dtype): + """ + Check that having sparse or dense `centers` format should not + influence the convergence. + Non-regression test for gh-13334. + """ + centers = container(np.zeros((1, 10))) + rng = np.random.RandomState(42) + X = rng.rand(40, 10).astype(global_dtype, copy=False) + y = (4 * rng.rand(40)).astype(int) + ap = AffinityPropagation(random_state=46) + ap.fit(X, y) + ap.cluster_centers_ = centers + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + assert_array_equal(ap.predict(X), np.zeros(X.shape[0], dtype=int)) + + +# FIXME; this test is broken with different random states, needs to be revisited +def test_correct_clusters(global_dtype): + # Test to fix incorrect clusters due to dtype change + # (non-regression test for issue #10832) + X = np.array( + [[1, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 1]], dtype=global_dtype + ) + afp = AffinityPropagation(preference=1, affinity="precomputed", random_state=0).fit( + X + ) + expected = np.array([0, 1, 1, 2]) + assert_array_equal(afp.labels_, expected) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input_for_predict(csr_container): + # Test to make sure sparse inputs are accepted for predict + # (non-regression test for issue #20049) + af = AffinityPropagation(affinity="euclidean", random_state=42) + af.fit(X) + labels = af.predict(csr_container((2, 2))) + assert_array_equal(labels, (2, 2)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input_for_fit_predict(csr_container): + # Test to make sure sparse inputs are accepted for fit_predict + # (non-regression test for issue #20049) + af = AffinityPropagation(affinity="euclidean", random_state=42) + rng = np.random.RandomState(42) + X = csr_container(rng.randint(0, 2, size=(5, 5))) + labels = af.fit_predict(X) + assert_array_equal(labels, (0, 1, 1, 2, 3)) + + +def test_affinity_propagation_equal_points(): + """Make sure we do not assign multiple clusters to equal points. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/20043 + """ + X = np.zeros((8, 1)) + af = AffinityPropagation(affinity="euclidean", damping=0.5, random_state=42).fit(X) + assert np.all(af.labels_ == 0) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_bicluster.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_bicluster.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc845a7bf262c60cf9f039e5ce021d841bdf4d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_bicluster.py @@ -0,0 +1,264 @@ +"""Testing for Spectral Biclustering methods""" + +import numpy as np +import pytest +from scipy.sparse import issparse + +from sklearn.base import BaseEstimator, BiclusterMixin +from sklearn.cluster import SpectralBiclustering, SpectralCoclustering +from sklearn.cluster._bicluster import ( + _bistochastic_normalize, + _log_normalize, + _scale_normalize, +) +from sklearn.datasets import make_biclusters, make_checkerboard +from sklearn.metrics import consensus_score, v_measure_score +from sklearn.model_selection import ParameterGrid +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +class MockBiclustering(BiclusterMixin, BaseEstimator): + # Mock object for testing get_submatrix. + def __init__(self): + pass + + def get_indices(self, i): + # Overridden to reproduce old get_submatrix test. + return ( + np.where([True, True, False, False, True])[0], + np.where([False, False, True, True])[0], + ) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_get_submatrix(csr_container): + data = np.arange(20).reshape(5, 4) + model = MockBiclustering() + + for X in (data, csr_container(data), data.tolist()): + submatrix = model.get_submatrix(0, X) + if issparse(submatrix): + submatrix = submatrix.toarray() + assert_array_equal(submatrix, [[2, 3], [6, 7], [18, 19]]) + submatrix[:] = -1 + if issparse(X): + X = X.toarray() + assert np.all(X != -1) + + +def _test_shape_indices(model): + # Test get_shape and get_indices on fitted model. + for i in range(model.n_clusters): + m, n = model.get_shape(i) + i_ind, j_ind = model.get_indices(i) + assert len(i_ind) == m + assert len(j_ind) == n + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_spectral_coclustering(global_random_seed, csr_container): + # Test Dhillon's Spectral CoClustering on a simple problem. + param_grid = { + "svd_method": ["randomized", "arpack"], + "n_svd_vecs": [None, 20], + "mini_batch": [False, True], + "init": ["k-means++"], + "n_init": [10], + } + S, rows, cols = make_biclusters( + (30, 30), 3, noise=0.1, random_state=global_random_seed + ) + S -= S.min() # needs to be nonnegative before making it sparse + S = np.where(S < 1, 0, S) # threshold some values + for mat in (S, csr_container(S)): + for kwargs in ParameterGrid(param_grid): + model = SpectralCoclustering( + n_clusters=3, random_state=global_random_seed, **kwargs + ) + model.fit(mat) + + assert model.rows_.shape == (3, 30) + assert_array_equal(model.rows_.sum(axis=0), np.ones(30)) + assert_array_equal(model.columns_.sum(axis=0), np.ones(30)) + assert consensus_score(model.biclusters_, (rows, cols)) == 1 + + _test_shape_indices(model) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_spectral_biclustering(global_random_seed, csr_container): + # Test Kluger methods on a checkerboard dataset. + S, rows, cols = make_checkerboard( + (30, 30), 3, noise=0.5, random_state=global_random_seed + ) + + non_default_params = { + "method": ["scale", "log"], + "svd_method": ["arpack"], + "n_svd_vecs": [20], + "mini_batch": [True], + } + + for mat in (S, csr_container(S)): + for param_name, param_values in non_default_params.items(): + for param_value in param_values: + model = SpectralBiclustering( + n_clusters=3, + n_init=3, + init="k-means++", + random_state=global_random_seed, + ) + model.set_params(**dict([(param_name, param_value)])) + + if issparse(mat) and model.get_params().get("method") == "log": + # cannot take log of sparse matrix + with pytest.raises(ValueError): + model.fit(mat) + continue + else: + model.fit(mat) + + assert model.rows_.shape == (9, 30) + assert model.columns_.shape == (9, 30) + assert_array_equal(model.rows_.sum(axis=0), np.repeat(3, 30)) + assert_array_equal(model.columns_.sum(axis=0), np.repeat(3, 30)) + assert consensus_score(model.biclusters_, (rows, cols)) == 1 + + _test_shape_indices(model) + + +def _do_scale_test(scaled): + """Check that rows sum to one constant, and columns to another.""" + row_sum = scaled.sum(axis=1) + col_sum = scaled.sum(axis=0) + if issparse(scaled): + row_sum = np.asarray(row_sum).squeeze() + col_sum = np.asarray(col_sum).squeeze() + assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100), decimal=1) + assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100), decimal=1) + + +def _do_bistochastic_test(scaled): + """Check that rows and columns sum to the same constant.""" + _do_scale_test(scaled) + assert_almost_equal(scaled.sum(axis=0).mean(), scaled.sum(axis=1).mean(), decimal=1) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_scale_normalize(global_random_seed, csr_container): + generator = np.random.RandomState(global_random_seed) + X = generator.rand(100, 100) + for mat in (X, csr_container(X)): + scaled, _, _ = _scale_normalize(mat) + _do_scale_test(scaled) + if issparse(mat): + assert issparse(scaled) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_bistochastic_normalize(global_random_seed, csr_container): + generator = np.random.RandomState(global_random_seed) + X = generator.rand(100, 100) + for mat in (X, csr_container(X)): + scaled = _bistochastic_normalize(mat) + _do_bistochastic_test(scaled) + if issparse(mat): + assert issparse(scaled) + + +def test_log_normalize(global_random_seed): + # adding any constant to a log-scaled matrix should make it + # bistochastic + generator = np.random.RandomState(global_random_seed) + mat = generator.rand(100, 100) + scaled = _log_normalize(mat) + 1 + _do_bistochastic_test(scaled) + + +def test_fit_best_piecewise(global_random_seed): + model = SpectralBiclustering(random_state=global_random_seed) + vectors = np.array([[0, 0, 0, 1, 1, 1], [2, 2, 2, 3, 3, 3], [0, 1, 2, 3, 4, 5]]) + best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2) + assert_array_equal(best, vectors[:2]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_project_and_cluster(global_random_seed, csr_container): + model = SpectralBiclustering(random_state=global_random_seed) + data = np.array([[1, 1, 1], [1, 1, 1], [3, 6, 3], [3, 6, 3]]) + vectors = np.array([[1, 0], [0, 1], [0, 0]]) + for mat in (data, csr_container(data)): + labels = model._project_and_cluster(mat, vectors, n_clusters=2) + assert_almost_equal(v_measure_score(labels, [0, 0, 1, 1]), 1.0) + + +def test_perfect_checkerboard(global_random_seed): + # XXX Previously failed on build bot (not reproducible) + model = SpectralBiclustering( + 3, svd_method="arpack", random_state=global_random_seed + ) + + S, rows, cols = make_checkerboard( + (30, 30), 3, noise=0, random_state=global_random_seed + ) + model.fit(S) + assert consensus_score(model.biclusters_, (rows, cols)) == 1 + + S, rows, cols = make_checkerboard( + (40, 30), 3, noise=0, random_state=global_random_seed + ) + model.fit(S) + assert consensus_score(model.biclusters_, (rows, cols)) == 1 + + S, rows, cols = make_checkerboard( + (30, 40), 3, noise=0, random_state=global_random_seed + ) + model.fit(S) + assert consensus_score(model.biclusters_, (rows, cols)) == 1 + + +@pytest.mark.parametrize( + "params, type_err, err_msg", + [ + ( + {"n_clusters": 6}, + ValueError, + "n_clusters should be <= n_samples=5", + ), + ( + {"n_clusters": (3, 3, 3)}, + ValueError, + "Incorrect parameter n_clusters", + ), + ( + {"n_clusters": (3, 6)}, + ValueError, + "Incorrect parameter n_clusters", + ), + ( + {"n_components": 3, "n_best": 4}, + ValueError, + "n_best=4 must be <= n_components=3", + ), + ], +) +def test_spectralbiclustering_parameter_validation(params, type_err, err_msg): + """Check parameters validation in `SpectralBiClustering`""" + data = np.arange(25).reshape((5, 5)) + model = SpectralBiclustering(**params) + with pytest.raises(type_err, match=err_msg): + model.fit(data) + + +@pytest.mark.parametrize("est", (SpectralBiclustering(), SpectralCoclustering())) +def test_n_features_in_(est): + X, _, _ = make_biclusters((3, 3), 3, random_state=0) + + assert not hasattr(est, "n_features_in_") + est.fit(X) + assert est.n_features_in_ == 3 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_birch.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_birch.py new file mode 100644 index 0000000000000000000000000000000000000000..fc1c702d1f462b877ea70dcaa43667bdf446b589 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_birch.py @@ -0,0 +1,242 @@ +""" +Tests for the birch clustering algorithm. +""" + +import numpy as np +import pytest + +from sklearn.cluster import AgglomerativeClustering, Birch +from sklearn.cluster.tests.common import generate_clustered_data +from sklearn.datasets import make_blobs +from sklearn.exceptions import ConvergenceWarning +from sklearn.metrics import pairwise_distances_argmin, v_measure_score +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import CSR_CONTAINERS + + +def test_n_samples_leaves_roots(global_random_seed, global_dtype): + # Sanity check for the number of samples in leaves and roots + X, y = make_blobs(n_samples=10, random_state=global_random_seed) + X = X.astype(global_dtype, copy=False) + brc = Birch() + brc.fit(X) + n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_]) + n_samples_leaves = sum( + [sc.n_samples_ for leaf in brc._get_leaves() for sc in leaf.subclusters_] + ) + assert n_samples_leaves == X.shape[0] + assert n_samples_root == X.shape[0] + + +def test_partial_fit(global_random_seed, global_dtype): + # Test that fit is equivalent to calling partial_fit multiple times + X, y = make_blobs(n_samples=100, random_state=global_random_seed) + X = X.astype(global_dtype, copy=False) + brc = Birch(n_clusters=3) + brc.fit(X) + brc_partial = Birch(n_clusters=None) + brc_partial.partial_fit(X[:50]) + brc_partial.partial_fit(X[50:]) + assert_allclose(brc_partial.subcluster_centers_, brc.subcluster_centers_) + + # Test that same global labels are obtained after calling partial_fit + # with None + brc_partial.set_params(n_clusters=3) + brc_partial.partial_fit(None) + assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_) + + +def test_birch_predict(global_random_seed, global_dtype): + # Test the predict method predicts the nearest centroid. + rng = np.random.RandomState(global_random_seed) + X = generate_clustered_data(n_clusters=3, n_features=3, n_samples_per_cluster=10) + X = X.astype(global_dtype, copy=False) + + # n_samples * n_samples_per_cluster + shuffle_indices = np.arange(30) + rng.shuffle(shuffle_indices) + X_shuffle = X[shuffle_indices, :] + brc = Birch(n_clusters=4, threshold=1.0) + brc.fit(X_shuffle) + + # Birch must preserve inputs' dtype + assert brc.subcluster_centers_.dtype == global_dtype + + assert_array_equal(brc.labels_, brc.predict(X_shuffle)) + centroids = brc.subcluster_centers_ + nearest_centroid = brc.subcluster_labels_[ + pairwise_distances_argmin(X_shuffle, centroids) + ] + assert_allclose(v_measure_score(nearest_centroid, brc.labels_), 1.0) + + +def test_n_clusters(global_random_seed, global_dtype): + # Test that n_clusters param works properly + X, y = make_blobs(n_samples=100, centers=10, random_state=global_random_seed) + X = X.astype(global_dtype, copy=False) + brc1 = Birch(n_clusters=10) + brc1.fit(X) + assert len(brc1.subcluster_centers_) > 10 + assert len(np.unique(brc1.labels_)) == 10 + + # Test that n_clusters = Agglomerative Clustering gives + # the same results. + gc = AgglomerativeClustering(n_clusters=10) + brc2 = Birch(n_clusters=gc) + brc2.fit(X) + assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_) + assert_array_equal(brc1.labels_, brc2.labels_) + + # Test that a small number of clusters raises a warning. + brc4 = Birch(threshold=10000.0) + with pytest.warns(ConvergenceWarning): + brc4.fit(X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_X(global_random_seed, global_dtype, csr_container): + # Test that sparse and dense data give same results + X, y = make_blobs(n_samples=100, centers=10, random_state=global_random_seed) + X = X.astype(global_dtype, copy=False) + brc = Birch(n_clusters=10) + brc.fit(X) + + csr = csr_container(X) + brc_sparse = Birch(n_clusters=10) + brc_sparse.fit(csr) + + # Birch must preserve inputs' dtype + assert brc_sparse.subcluster_centers_.dtype == global_dtype + + assert_array_equal(brc.labels_, brc_sparse.labels_) + assert_allclose(brc.subcluster_centers_, brc_sparse.subcluster_centers_) + + +def test_partial_fit_second_call_error_checks(): + # second partial fit calls will error when n_features is not consistent + # with the first call + X, y = make_blobs(n_samples=100) + brc = Birch(n_clusters=3) + brc.partial_fit(X, y) + + msg = "X has 1 features, but Birch is expecting 2 features" + with pytest.raises(ValueError, match=msg): + brc.partial_fit(X[:, [0]], y) + + +def check_branching_factor(node, branching_factor): + subclusters = node.subclusters_ + assert branching_factor >= len(subclusters) + for cluster in subclusters: + if cluster.child_: + check_branching_factor(cluster.child_, branching_factor) + + +def test_branching_factor(global_random_seed, global_dtype): + # Test that nodes have at max branching_factor number of subclusters + X, y = make_blobs(random_state=global_random_seed) + X = X.astype(global_dtype, copy=False) + branching_factor = 9 + + # Purposefully set a low threshold to maximize the subclusters. + brc = Birch(n_clusters=None, branching_factor=branching_factor, threshold=0.01) + brc.fit(X) + check_branching_factor(brc.root_, branching_factor) + brc = Birch(n_clusters=3, branching_factor=branching_factor, threshold=0.01) + brc.fit(X) + check_branching_factor(brc.root_, branching_factor) + + +def check_threshold(birch_instance, threshold): + """Use the leaf linked list for traversal""" + current_leaf = birch_instance.dummy_leaf_.next_leaf_ + while current_leaf: + subclusters = current_leaf.subclusters_ + for sc in subclusters: + assert threshold >= sc.radius + current_leaf = current_leaf.next_leaf_ + + +def test_threshold(global_random_seed, global_dtype): + # Test that the leaf subclusters have a threshold lesser than radius + X, y = make_blobs(n_samples=80, centers=4, random_state=global_random_seed) + X = X.astype(global_dtype, copy=False) + brc = Birch(threshold=0.5, n_clusters=None) + brc.fit(X) + check_threshold(brc, 0.5) + + brc = Birch(threshold=5.0, n_clusters=None) + brc.fit(X) + check_threshold(brc, 5.0) + + +def test_birch_n_clusters_long_int(): + # Check that birch supports n_clusters with np.int64 dtype, for instance + # coming from np.arange. #16484 + X, _ = make_blobs(random_state=0) + n_clusters = np.int64(5) + Birch(n_clusters=n_clusters).fit(X) + + +def test_feature_names_out(): + """Check `get_feature_names_out` for `Birch`.""" + X, _ = make_blobs(n_samples=80, n_features=4, random_state=0) + brc = Birch(n_clusters=4) + brc.fit(X) + n_clusters = brc.subcluster_centers_.shape[0] + + names_out = brc.get_feature_names_out() + assert_array_equal([f"birch{i}" for i in range(n_clusters)], names_out) + + +def test_transform_match_across_dtypes(global_random_seed): + X, _ = make_blobs(n_samples=80, n_features=4, random_state=global_random_seed) + brc = Birch(n_clusters=4, threshold=1.1) + Y_64 = brc.fit_transform(X) + Y_32 = brc.fit_transform(X.astype(np.float32)) + + assert_allclose(Y_64, Y_32, atol=1e-6) + + +def test_subcluster_dtype(global_dtype): + X = make_blobs(n_samples=80, n_features=4, random_state=0)[0].astype( + global_dtype, copy=False + ) + brc = Birch(n_clusters=4) + assert brc.fit(X).subcluster_centers_.dtype == global_dtype + + +def test_both_subclusters_updated(): + """Check that both subclusters are updated when a node a split, even when there are + duplicated data points. Non-regression test for #23269. + """ + + X = np.array( + [ + [-2.6192791, -1.5053215], + [-2.9993038, -1.6863596], + [-2.3724914, -1.3438171], + [-2.336792, -1.3417323], + [-2.4089134, -1.3290224], + [-2.3724914, -1.3438171], + [-3.364009, -1.8846745], + [-2.3724914, -1.3438171], + [-2.617677, -1.5003285], + [-2.2960556, -1.3260119], + [-2.3724914, -1.3438171], + [-2.5459878, -1.4533926], + [-2.25979, -1.3003055], + [-2.4089134, -1.3290224], + [-2.3724914, -1.3438171], + [-2.4089134, -1.3290224], + [-2.5459878, -1.4533926], + [-2.3724914, -1.3438171], + [-2.9720619, -1.7058647], + [-2.336792, -1.3417323], + [-2.3724914, -1.3438171], + ], + dtype=np.float32, + ) + + # no error + Birch(branching_factor=5, threshold=1e-5, n_clusters=None).fit(X) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_bisect_k_means.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_bisect_k_means.py new file mode 100644 index 0000000000000000000000000000000000000000..799ddbc086ce0a14397fe5cb4aef607903c01228 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_bisect_k_means.py @@ -0,0 +1,158 @@ +import numpy as np +import pytest + +from sklearn.cluster import BisectingKMeans +from sklearn.metrics import v_measure_score +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import CSR_CONTAINERS + + +@pytest.mark.parametrize("bisecting_strategy", ["biggest_inertia", "largest_cluster"]) +@pytest.mark.parametrize("init", ["k-means++", "random"]) +def test_three_clusters(bisecting_strategy, init): + """Tries to perform bisect k-means for three clusters to check + if splitting data is performed correctly. + """ + X = np.array( + [[1, 1], [10, 1], [3, 1], [10, 0], [2, 1], [10, 2], [10, 8], [10, 9], [10, 10]] + ) + bisect_means = BisectingKMeans( + n_clusters=3, + random_state=0, + bisecting_strategy=bisecting_strategy, + init=init, + ) + bisect_means.fit(X) + + expected_centers = [[2, 1], [10, 1], [10, 9]] + expected_labels = [0, 1, 0, 1, 0, 1, 2, 2, 2] + + assert_allclose( + sorted(expected_centers), sorted(bisect_means.cluster_centers_.tolist()) + ) + assert_allclose(v_measure_score(expected_labels, bisect_means.labels_), 1.0) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse(csr_container): + """Test Bisecting K-Means with sparse data. + + Checks if labels and centers are the same between dense and sparse. + """ + + rng = np.random.RandomState(0) + + X = rng.rand(20, 2) + X[X < 0.8] = 0 + X_csr = csr_container(X) + + bisect_means = BisectingKMeans(n_clusters=3, random_state=0) + + bisect_means.fit(X_csr) + sparse_centers = bisect_means.cluster_centers_ + + bisect_means.fit(X) + normal_centers = bisect_means.cluster_centers_ + + # Check if results is the same for dense and sparse data + assert_allclose(normal_centers, sparse_centers, atol=1e-8) + + +@pytest.mark.parametrize("n_clusters", [4, 5]) +def test_n_clusters(n_clusters): + """Test if resulting labels are in range [0, n_clusters - 1].""" + + rng = np.random.RandomState(0) + X = rng.rand(10, 2) + + bisect_means = BisectingKMeans(n_clusters=n_clusters, random_state=0) + bisect_means.fit(X) + + assert_array_equal(np.unique(bisect_means.labels_), np.arange(n_clusters)) + + +def test_one_cluster(): + """Test single cluster.""" + + X = np.array([[1, 2], [10, 2], [10, 8]]) + + bisect_means = BisectingKMeans(n_clusters=1, random_state=0).fit(X) + + # All labels from fit or predict should be equal 0 + assert all(bisect_means.labels_ == 0) + assert all(bisect_means.predict(X) == 0) + + assert_allclose(bisect_means.cluster_centers_, X.mean(axis=0).reshape(1, -1)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None]) +def test_fit_predict(csr_container): + """Check if labels from fit(X) method are same as from fit(X).predict(X).""" + rng = np.random.RandomState(0) + + X = rng.rand(10, 2) + + if csr_container is not None: + X[X < 0.8] = 0 + X = csr_container(X) + + bisect_means = BisectingKMeans(n_clusters=3, random_state=0) + bisect_means.fit(X) + + assert_array_equal(bisect_means.labels_, bisect_means.predict(X)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None]) +def test_dtype_preserved(csr_container, global_dtype): + """Check that centers dtype is the same as input data dtype.""" + rng = np.random.RandomState(0) + X = rng.rand(10, 2).astype(global_dtype, copy=False) + + if csr_container is not None: + X[X < 0.8] = 0 + X = csr_container(X) + + km = BisectingKMeans(n_clusters=3, random_state=0) + km.fit(X) + + assert km.cluster_centers_.dtype == global_dtype + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None]) +def test_float32_float64_equivalence(csr_container): + """Check that the results are the same between float32 and float64.""" + rng = np.random.RandomState(0) + X = rng.rand(10, 2) + + if csr_container is not None: + X[X < 0.8] = 0 + X = csr_container(X) + + km64 = BisectingKMeans(n_clusters=3, random_state=0).fit(X) + km32 = BisectingKMeans(n_clusters=3, random_state=0).fit(X.astype(np.float32)) + + assert_allclose(km32.cluster_centers_, km64.cluster_centers_) + assert_array_equal(km32.labels_, km64.labels_) + + +@pytest.mark.parametrize("algorithm", ("lloyd", "elkan")) +def test_no_crash_on_empty_bisections(algorithm): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/27081 + rng = np.random.RandomState(0) + X_train = rng.rand(3000, 10) + bkm = BisectingKMeans(n_clusters=10, algorithm=algorithm).fit(X_train) + + # predict on scaled data to trigger pathologic case + # where the inner mask leads to empty bisections. + X_test = 50 * rng.rand(100, 10) + labels = bkm.predict(X_test) # should not crash with idiv by 0 + assert np.isin(np.unique(labels), np.arange(10)).all() + + +def test_one_feature(): + # Check that no error is raised when there is only one feature + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/27236 + X = np.random.normal(size=(128, 1)) + BisectingKMeans(bisecting_strategy="biggest_inertia", random_state=0).fit(X) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_dbscan.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_dbscan.py new file mode 100644 index 0000000000000000000000000000000000000000..d42cc2b17d518515c31f8420e96db7f2ef05b4d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_dbscan.py @@ -0,0 +1,434 @@ +""" +Tests for DBSCAN clustering algorithm +""" + +import pickle +import warnings + +import numpy as np +import pytest +from scipy.spatial import distance + +from sklearn.cluster import DBSCAN, dbscan +from sklearn.cluster.tests.common import generate_clustered_data +from sklearn.metrics.pairwise import pairwise_distances +from sklearn.neighbors import NearestNeighbors +from sklearn.utils._testing import assert_array_equal +from sklearn.utils.fixes import CSR_CONTAINERS, LIL_CONTAINERS + +n_clusters = 3 +X = generate_clustered_data(n_clusters=n_clusters) + + +def test_dbscan_similarity(): + # Tests the DBSCAN algorithm with a similarity array. + # Parameters chosen specifically for this task. + eps = 0.15 + min_samples = 10 + # Compute similarities + D = distance.squareform(distance.pdist(X)) + D /= np.max(D) + # Compute DBSCAN + core_samples, labels = dbscan( + D, metric="precomputed", eps=eps, min_samples=min_samples + ) + # number of clusters, ignoring noise if present + n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0) + + assert n_clusters_1 == n_clusters + + db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples) + labels = db.fit(D).labels_ + + n_clusters_2 = len(set(labels)) - int(-1 in labels) + assert n_clusters_2 == n_clusters + + +def test_dbscan_feature(): + # Tests the DBSCAN algorithm with a feature vector array. + # Parameters chosen specifically for this task. + # Different eps to other test, because distance is not normalised. + eps = 0.8 + min_samples = 10 + metric = "euclidean" + # Compute DBSCAN + # parameters chosen for task + core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples) + + # number of clusters, ignoring noise if present + n_clusters_1 = len(set(labels)) - int(-1 in labels) + assert n_clusters_1 == n_clusters + + db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples) + labels = db.fit(X).labels_ + + n_clusters_2 = len(set(labels)) - int(-1 in labels) + assert n_clusters_2 == n_clusters + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_dbscan_sparse(lil_container): + core_sparse, labels_sparse = dbscan(lil_container(X), eps=0.8, min_samples=10) + core_dense, labels_dense = dbscan(X, eps=0.8, min_samples=10) + assert_array_equal(core_dense, core_sparse) + assert_array_equal(labels_dense, labels_sparse) + + +@pytest.mark.parametrize("include_self", [False, True]) +def test_dbscan_sparse_precomputed(include_self): + D = pairwise_distances(X) + nn = NearestNeighbors(radius=0.9).fit(X) + X_ = X if include_self else None + D_sparse = nn.radius_neighbors_graph(X=X_, mode="distance") + # Ensure it is sparse not merely on diagonals: + assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1) + core_sparse, labels_sparse = dbscan( + D_sparse, eps=0.8, min_samples=10, metric="precomputed" + ) + core_dense, labels_dense = dbscan(D, eps=0.8, min_samples=10, metric="precomputed") + assert_array_equal(core_dense, core_sparse) + assert_array_equal(labels_dense, labels_sparse) + + +def test_dbscan_sparse_precomputed_different_eps(): + # test that precomputed neighbors graph is filtered if computed with + # a radius larger than DBSCAN's eps. + lower_eps = 0.2 + nn = NearestNeighbors(radius=lower_eps).fit(X) + D_sparse = nn.radius_neighbors_graph(X, mode="distance") + dbscan_lower = dbscan(D_sparse, eps=lower_eps, metric="precomputed") + + higher_eps = lower_eps + 0.7 + nn = NearestNeighbors(radius=higher_eps).fit(X) + D_sparse = nn.radius_neighbors_graph(X, mode="distance") + dbscan_higher = dbscan(D_sparse, eps=lower_eps, metric="precomputed") + + assert_array_equal(dbscan_lower[0], dbscan_higher[0]) + assert_array_equal(dbscan_lower[1], dbscan_higher[1]) + + +@pytest.mark.parametrize("metric", ["precomputed", "minkowski"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None]) +def test_dbscan_input_not_modified(metric, csr_container): + # test that the input is not modified by dbscan + X = np.random.RandomState(0).rand(10, 10) + X = csr_container(X) if csr_container is not None else X + X_copy = X.copy() + dbscan(X, metric=metric) + + if csr_container is not None: + assert_array_equal(X.toarray(), X_copy.toarray()) + else: + assert_array_equal(X, X_copy) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dbscan_input_not_modified_precomputed_sparse_nodiag(csr_container): + """Check that we don't modify in-place the pre-computed sparse matrix. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27508 + """ + X = np.random.RandomState(0).rand(10, 10) + # Add zeros on the diagonal that will be implicit when creating + # the sparse matrix. If `X` is modified in-place, the zeros from + # the diagonal will be made explicit. + np.fill_diagonal(X, 0) + X = csr_container(X) + assert all(row != col for row, col in zip(*X.nonzero())) + X_copy = X.copy() + dbscan(X, metric="precomputed") + # Make sure that we did not modify `X` in-place even by creating + # explicit 0s values. + assert X.nnz == X_copy.nnz + assert_array_equal(X.toarray(), X_copy.toarray()) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dbscan_no_core_samples(csr_container): + rng = np.random.RandomState(0) + X = rng.rand(40, 10) + X[X < 0.8] = 0 + + for X_ in [X, csr_container(X)]: + db = DBSCAN(min_samples=6).fit(X_) + assert_array_equal(db.components_, np.empty((0, X_.shape[1]))) + assert_array_equal(db.labels_, -1) + assert db.core_sample_indices_.shape == (0,) + + +def test_dbscan_callable(): + # Tests the DBSCAN algorithm with a callable metric. + # Parameters chosen specifically for this task. + # Different eps to other test, because distance is not normalised. + eps = 0.8 + min_samples = 10 + # metric is the function reference, not the string key. + metric = distance.euclidean + # Compute DBSCAN + # parameters chosen for task + core_samples, labels = dbscan( + X, metric=metric, eps=eps, min_samples=min_samples, algorithm="ball_tree" + ) + + # number of clusters, ignoring noise if present + n_clusters_1 = len(set(labels)) - int(-1 in labels) + assert n_clusters_1 == n_clusters + + db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples, algorithm="ball_tree") + labels = db.fit(X).labels_ + + n_clusters_2 = len(set(labels)) - int(-1 in labels) + assert n_clusters_2 == n_clusters + + +def test_dbscan_metric_params(): + # Tests that DBSCAN works with the metrics_params argument. + eps = 0.8 + min_samples = 10 + p = 1 + + # Compute DBSCAN with metric_params arg + + with warnings.catch_warnings(record=True) as warns: + db = DBSCAN( + metric="minkowski", + metric_params={"p": p}, + eps=eps, + p=None, + min_samples=min_samples, + algorithm="ball_tree", + ).fit(X) + assert not warns, warns[0].message + core_sample_1, labels_1 = db.core_sample_indices_, db.labels_ + + # Test that sample labels are the same as passing Minkowski 'p' directly + db = DBSCAN( + metric="minkowski", eps=eps, min_samples=min_samples, algorithm="ball_tree", p=p + ).fit(X) + core_sample_2, labels_2 = db.core_sample_indices_, db.labels_ + + assert_array_equal(core_sample_1, core_sample_2) + assert_array_equal(labels_1, labels_2) + + # Minkowski with p=1 should be equivalent to Manhattan distance + db = DBSCAN( + metric="manhattan", eps=eps, min_samples=min_samples, algorithm="ball_tree" + ).fit(X) + core_sample_3, labels_3 = db.core_sample_indices_, db.labels_ + + assert_array_equal(core_sample_1, core_sample_3) + assert_array_equal(labels_1, labels_3) + + with pytest.warns( + SyntaxWarning, + match=( + "Parameter p is found in metric_params. " + "The corresponding parameter from __init__ " + "is ignored." + ), + ): + # Test that checks p is ignored in favor of metric_params={'p': } + db = DBSCAN( + metric="minkowski", + metric_params={"p": p}, + eps=eps, + p=p + 1, + min_samples=min_samples, + algorithm="ball_tree", + ).fit(X) + core_sample_4, labels_4 = db.core_sample_indices_, db.labels_ + + assert_array_equal(core_sample_1, core_sample_4) + assert_array_equal(labels_1, labels_4) + + +def test_dbscan_balltree(): + # Tests the DBSCAN algorithm with balltree for neighbor calculation. + eps = 0.8 + min_samples = 10 + + D = pairwise_distances(X) + core_samples, labels = dbscan( + D, metric="precomputed", eps=eps, min_samples=min_samples + ) + + # number of clusters, ignoring noise if present + n_clusters_1 = len(set(labels)) - int(-1 in labels) + assert n_clusters_1 == n_clusters + + db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm="ball_tree") + labels = db.fit(X).labels_ + + n_clusters_2 = len(set(labels)) - int(-1 in labels) + assert n_clusters_2 == n_clusters + + db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm="kd_tree") + labels = db.fit(X).labels_ + + n_clusters_3 = len(set(labels)) - int(-1 in labels) + assert n_clusters_3 == n_clusters + + db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm="ball_tree") + labels = db.fit(X).labels_ + + n_clusters_4 = len(set(labels)) - int(-1 in labels) + assert n_clusters_4 == n_clusters + + db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples, algorithm="ball_tree") + labels = db.fit(X).labels_ + + n_clusters_5 = len(set(labels)) - int(-1 in labels) + assert n_clusters_5 == n_clusters + + +def test_input_validation(): + # DBSCAN.fit should accept a list of lists. + X = [[1.0, 2.0], [3.0, 4.0]] + DBSCAN().fit(X) # must not raise exception + + +def test_pickle(): + obj = DBSCAN() + s = pickle.dumps(obj) + assert type(pickle.loads(s)) == obj.__class__ + + +def test_boundaries(): + # ensure min_samples is inclusive of core point + core, _ = dbscan([[0], [1]], eps=2, min_samples=2) + assert 0 in core + # ensure eps is inclusive of circumference + core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2) + assert 0 in core + core, _ = dbscan([[0], [1], [1]], eps=0.99, min_samples=2) + assert 0 not in core + + +def test_weighted_dbscan(global_random_seed): + # ensure sample_weight is validated + with pytest.raises(ValueError): + dbscan([[0], [1]], sample_weight=[2]) + with pytest.raises(ValueError): + dbscan([[0], [1]], sample_weight=[2, 3, 4]) + + # ensure sample_weight has an effect + assert_array_equal([], dbscan([[0], [1]], sample_weight=None, min_samples=6)[0]) + assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5], min_samples=6)[0]) + assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5], min_samples=6)[0]) + assert_array_equal( + [0, 1], dbscan([[0], [1]], sample_weight=[6, 6], min_samples=6)[0] + ) + + # points within eps of each other: + assert_array_equal( + [0, 1], dbscan([[0], [1]], eps=1.5, sample_weight=[5, 1], min_samples=6)[0] + ) + # and effect of non-positive and non-integer sample_weight: + assert_array_equal( + [], dbscan([[0], [1]], sample_weight=[5, 0], eps=1.5, min_samples=6)[0] + ) + assert_array_equal( + [0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1], eps=1.5, min_samples=6)[0] + ) + assert_array_equal( + [0, 1], dbscan([[0], [1]], sample_weight=[6, 0], eps=1.5, min_samples=6)[0] + ) + assert_array_equal( + [], dbscan([[0], [1]], sample_weight=[6, -1], eps=1.5, min_samples=6)[0] + ) + + # for non-negative sample_weight, cores should be identical to repetition + rng = np.random.RandomState(global_random_seed) + sample_weight = rng.randint(0, 5, X.shape[0]) + core1, label1 = dbscan(X, sample_weight=sample_weight) + assert len(label1) == len(X) + + X_repeated = np.repeat(X, sample_weight, axis=0) + core_repeated, label_repeated = dbscan(X_repeated) + core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool) + core_repeated_mask[core_repeated] = True + core_mask = np.zeros(X.shape[0], dtype=bool) + core_mask[core1] = True + assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask) + + # sample_weight should work with precomputed distance matrix + D = pairwise_distances(X) + core3, label3 = dbscan(D, sample_weight=sample_weight, metric="precomputed") + assert_array_equal(core1, core3) + assert_array_equal(label1, label3) + + # sample_weight should work with estimator + est = DBSCAN().fit(X, sample_weight=sample_weight) + core4 = est.core_sample_indices_ + label4 = est.labels_ + assert_array_equal(core1, core4) + assert_array_equal(label1, label4) + + est = DBSCAN() + label5 = est.fit_predict(X, sample_weight=sample_weight) + core5 = est.core_sample_indices_ + assert_array_equal(core1, core5) + assert_array_equal(label1, label5) + assert_array_equal(label1, est.labels_) + + +@pytest.mark.parametrize("algorithm", ["brute", "kd_tree", "ball_tree"]) +def test_dbscan_core_samples_toy(algorithm): + X = [[0], [2], [3], [4], [6], [8], [10]] + n_samples = len(X) + + # Degenerate case: every sample is a core sample, either with its own + # cluster or including other close core samples. + core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=1) + assert_array_equal(core_samples, np.arange(n_samples)) + assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4]) + + # With eps=1 and min_samples=2 only the 3 samples from the denser area + # are core samples. All other points are isolated and considered noise. + core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=2) + assert_array_equal(core_samples, [1, 2, 3]) + assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1]) + + # Only the sample in the middle of the dense area is core. Its two + # neighbors are edge samples. Remaining samples are noise. + core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=3) + assert_array_equal(core_samples, [2]) + assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1]) + + # It's no longer possible to extract core samples with eps=1: + # everything is noise. + core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=4) + assert_array_equal(core_samples, []) + assert_array_equal(labels, np.full(n_samples, -1.0)) + + +def test_dbscan_precomputed_metric_with_degenerate_input_arrays(): + # see https://github.com/scikit-learn/scikit-learn/issues/4641 for + # more details + X = np.eye(10) + labels = DBSCAN(eps=0.5, metric="precomputed").fit(X).labels_ + assert len(set(labels)) == 1 + + X = np.zeros((10, 10)) + labels = DBSCAN(eps=0.5, metric="precomputed").fit(X).labels_ + assert len(set(labels)) == 1 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dbscan_precomputed_metric_with_initial_rows_zero(csr_container): + # sample matrix with initial two row all zero + ar = np.array( + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0], + [0.0, 0.0, 0.1, 0.1, 0.0, 0.0, 0.3], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1], + [0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.0], + ] + ) + matrix = csr_container(ar) + labels = DBSCAN(eps=0.2, metric="precomputed", min_samples=2).fit(matrix).labels_ + assert_array_equal(labels, [-1, -1, 0, 0, 0, 1, 1]) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_feature_agglomeration.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_feature_agglomeration.py new file mode 100644 index 0000000000000000000000000000000000000000..121e8f2cfe400c1fb5e6608cc0e120b4d1a36a1d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_feature_agglomeration.py @@ -0,0 +1,80 @@ +""" +Tests for sklearn.cluster._feature_agglomeration +""" +# Authors: Sergul Aydore 2017 +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.cluster import FeatureAgglomeration +from sklearn.datasets import make_blobs +from sklearn.utils._testing import assert_array_almost_equal + + +def test_feature_agglomeration(): + n_clusters = 1 + X = np.array([0, 0, 1]).reshape(1, 3) # (n_samples, n_features) + + agglo_mean = FeatureAgglomeration(n_clusters=n_clusters, pooling_func=np.mean) + agglo_median = FeatureAgglomeration(n_clusters=n_clusters, pooling_func=np.median) + agglo_mean.fit(X) + agglo_median.fit(X) + + assert np.size(np.unique(agglo_mean.labels_)) == n_clusters + assert np.size(np.unique(agglo_median.labels_)) == n_clusters + assert np.size(agglo_mean.labels_) == X.shape[1] + assert np.size(agglo_median.labels_) == X.shape[1] + + # Test transform + Xt_mean = agglo_mean.transform(X) + Xt_median = agglo_median.transform(X) + assert Xt_mean.shape[1] == n_clusters + assert Xt_median.shape[1] == n_clusters + assert Xt_mean == np.array([1 / 3.0]) + assert Xt_median == np.array([0.0]) + + # Test inverse transform + X_full_mean = agglo_mean.inverse_transform(Xt_mean) + X_full_median = agglo_median.inverse_transform(Xt_median) + assert np.unique(X_full_mean[0]).size == n_clusters + assert np.unique(X_full_median[0]).size == n_clusters + + assert_array_almost_equal(agglo_mean.transform(X_full_mean), Xt_mean) + assert_array_almost_equal(agglo_median.transform(X_full_median), Xt_median) + + +def test_feature_agglomeration_feature_names_out(): + """Check `get_feature_names_out` for `FeatureAgglomeration`.""" + X, _ = make_blobs(n_features=6, random_state=0) + agglo = FeatureAgglomeration(n_clusters=3) + agglo.fit(X) + n_clusters = agglo.n_clusters_ + + names_out = agglo.get_feature_names_out() + assert_array_equal( + [f"featureagglomeration{i}" for i in range(n_clusters)], names_out + ) + + +# TODO(1.5): remove this test +def test_inverse_transform_Xred_deprecation(): + X = np.array([0, 0, 1]).reshape(1, 3) # (n_samples, n_features) + + est = FeatureAgglomeration(n_clusters=1, pooling_func=np.mean) + est.fit(X) + Xt = est.transform(X) + + with pytest.raises(TypeError, match="Missing required positional argument"): + est.inverse_transform() + + with pytest.raises(ValueError, match="Please provide only"): + est.inverse_transform(Xt=Xt, Xred=Xt) + + with warnings.catch_warnings(record=True): + warnings.simplefilter("error") + est.inverse_transform(Xt) + + with pytest.warns(FutureWarning, match="Input argument `Xred` was renamed to `Xt`"): + est.inverse_transform(Xred=Xt) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_hdbscan.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_hdbscan.py new file mode 100644 index 0000000000000000000000000000000000000000..6db2d4387de181358d0ec64a98035cc1293cfa3e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_hdbscan.py @@ -0,0 +1,581 @@ +""" +Tests for HDBSCAN clustering algorithm +Based on the DBSCAN test code +""" +import numpy as np +import pytest +from scipy import stats +from scipy.spatial import distance + +from sklearn.cluster import HDBSCAN +from sklearn.cluster._hdbscan._tree import ( + CONDENSED_dtype, + _condense_tree, + _do_labelling, +) +from sklearn.cluster._hdbscan.hdbscan import _OUTLIER_ENCODING +from sklearn.datasets import make_blobs +from sklearn.metrics import fowlkes_mallows_score +from sklearn.metrics.pairwise import _VALID_METRICS, euclidean_distances +from sklearn.neighbors import BallTree, KDTree +from sklearn.preprocessing import StandardScaler +from sklearn.utils import shuffle +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +X, y = make_blobs(n_samples=200, random_state=10) +X, y = shuffle(X, y, random_state=7) +X = StandardScaler().fit_transform(X) + +ALGORITHMS = [ + "kd_tree", + "ball_tree", + "brute", + "auto", +] + +OUTLIER_SET = {-1} | {out["label"] for _, out in _OUTLIER_ENCODING.items()} + + +def check_label_quality(labels, threshold=0.99): + n_clusters = len(set(labels) - OUTLIER_SET) + assert n_clusters == 3 + assert fowlkes_mallows_score(labels, y) > threshold + + +@pytest.mark.parametrize("outlier_type", _OUTLIER_ENCODING) +def test_outlier_data(outlier_type): + """ + Tests if np.inf and np.nan data are each treated as special outliers. + """ + outlier = { + "infinite": np.inf, + "missing": np.nan, + }[outlier_type] + prob_check = { + "infinite": lambda x, y: x == y, + "missing": lambda x, y: np.isnan(x), + }[outlier_type] + label = _OUTLIER_ENCODING[outlier_type]["label"] + prob = _OUTLIER_ENCODING[outlier_type]["prob"] + + X_outlier = X.copy() + X_outlier[0] = [outlier, 1] + X_outlier[5] = [outlier, outlier] + model = HDBSCAN().fit(X_outlier) + + (missing_labels_idx,) = (model.labels_ == label).nonzero() + assert_array_equal(missing_labels_idx, [0, 5]) + + (missing_probs_idx,) = (prob_check(model.probabilities_, prob)).nonzero() + assert_array_equal(missing_probs_idx, [0, 5]) + + clean_indices = list(range(1, 5)) + list(range(6, 200)) + clean_model = HDBSCAN().fit(X_outlier[clean_indices]) + assert_array_equal(clean_model.labels_, model.labels_[clean_indices]) + + +def test_hdbscan_distance_matrix(): + """ + Tests that HDBSCAN works with precomputed distance matrices, and throws the + appropriate errors when needed. + """ + D = euclidean_distances(X) + D_original = D.copy() + labels = HDBSCAN(metric="precomputed", copy=True).fit_predict(D) + + assert_allclose(D, D_original) + check_label_quality(labels) + + msg = r"The precomputed distance matrix.*has shape" + with pytest.raises(ValueError, match=msg): + HDBSCAN(metric="precomputed", copy=True).fit_predict(X) + + msg = r"The precomputed distance matrix.*values" + # Ensure the matrix is not symmetric + D[0, 1] = 10 + D[1, 0] = 1 + with pytest.raises(ValueError, match=msg): + HDBSCAN(metric="precomputed").fit_predict(D) + + +@pytest.mark.parametrize("sparse_constructor", [*CSR_CONTAINERS, *CSC_CONTAINERS]) +def test_hdbscan_sparse_distance_matrix(sparse_constructor): + """ + Tests that HDBSCAN works with sparse distance matrices. + """ + D = distance.squareform(distance.pdist(X)) + D /= np.max(D) + + threshold = stats.scoreatpercentile(D.flatten(), 50) + + D[D >= threshold] = 0.0 + D = sparse_constructor(D) + D.eliminate_zeros() + + labels = HDBSCAN(metric="precomputed").fit_predict(D) + check_label_quality(labels) + + +def test_hdbscan_feature_array(): + """ + Tests that HDBSCAN works with feature array, including an arbitrary + goodness of fit check. Note that the check is a simple heuristic. + """ + labels = HDBSCAN().fit_predict(X) + + # Check that clustering is arbitrarily good + # This is a heuristic to guard against regression + check_label_quality(labels) + + +@pytest.mark.parametrize("algo", ALGORITHMS) +@pytest.mark.parametrize("metric", _VALID_METRICS) +def test_hdbscan_algorithms(algo, metric): + """ + Tests that HDBSCAN works with the expected combinations of algorithms and + metrics, or raises the expected errors. + """ + labels = HDBSCAN(algorithm=algo).fit_predict(X) + check_label_quality(labels) + + # Validation for brute is handled by `pairwise_distances` + if algo in ("brute", "auto"): + return + + ALGOS_TREES = { + "kd_tree": KDTree, + "ball_tree": BallTree, + } + metric_params = { + "mahalanobis": {"V": np.eye(X.shape[1])}, + "seuclidean": {"V": np.ones(X.shape[1])}, + "minkowski": {"p": 2}, + "wminkowski": {"p": 2, "w": np.ones(X.shape[1])}, + }.get(metric, None) + + hdb = HDBSCAN( + algorithm=algo, + metric=metric, + metric_params=metric_params, + ) + + if metric not in ALGOS_TREES[algo].valid_metrics: + with pytest.raises(ValueError): + hdb.fit(X) + elif metric == "wminkowski": + with pytest.warns(FutureWarning): + hdb.fit(X) + else: + hdb.fit(X) + + +def test_dbscan_clustering(): + """ + Tests that HDBSCAN can generate a sufficiently accurate dbscan clustering. + This test is more of a sanity check than a rigorous evaluation. + """ + clusterer = HDBSCAN().fit(X) + labels = clusterer.dbscan_clustering(0.3) + + # We use a looser threshold due to dbscan producing a more constrained + # clustering representation + check_label_quality(labels, threshold=0.92) + + +@pytest.mark.parametrize("cut_distance", (0.1, 0.5, 1)) +def test_dbscan_clustering_outlier_data(cut_distance): + """ + Tests if np.inf and np.nan data are each treated as special outliers. + """ + missing_label = _OUTLIER_ENCODING["missing"]["label"] + infinite_label = _OUTLIER_ENCODING["infinite"]["label"] + + X_outlier = X.copy() + X_outlier[0] = [np.inf, 1] + X_outlier[2] = [1, np.nan] + X_outlier[5] = [np.inf, np.nan] + model = HDBSCAN().fit(X_outlier) + labels = model.dbscan_clustering(cut_distance=cut_distance) + + missing_labels_idx = np.flatnonzero(labels == missing_label) + assert_array_equal(missing_labels_idx, [2, 5]) + + infinite_labels_idx = np.flatnonzero(labels == infinite_label) + assert_array_equal(infinite_labels_idx, [0]) + + clean_idx = list(set(range(200)) - set(missing_labels_idx + infinite_labels_idx)) + clean_model = HDBSCAN().fit(X_outlier[clean_idx]) + clean_labels = clean_model.dbscan_clustering(cut_distance=cut_distance) + assert_array_equal(clean_labels, labels[clean_idx]) + + +def test_hdbscan_best_balltree_metric(): + """ + Tests that HDBSCAN using `BallTree` works. + """ + labels = HDBSCAN( + metric="seuclidean", metric_params={"V": np.ones(X.shape[1])} + ).fit_predict(X) + check_label_quality(labels) + + +def test_hdbscan_no_clusters(): + """ + Tests that HDBSCAN correctly does not generate a valid cluster when the + `min_cluster_size` is too large for the data. + """ + labels = HDBSCAN(min_cluster_size=len(X) - 1).fit_predict(X) + assert set(labels).issubset(OUTLIER_SET) + + +def test_hdbscan_min_cluster_size(): + """ + Test that the smallest non-noise cluster has at least `min_cluster_size` + many points + """ + for min_cluster_size in range(2, len(X), 1): + labels = HDBSCAN(min_cluster_size=min_cluster_size).fit_predict(X) + true_labels = [label for label in labels if label != -1] + if len(true_labels) != 0: + assert np.min(np.bincount(true_labels)) >= min_cluster_size + + +def test_hdbscan_callable_metric(): + """ + Tests that HDBSCAN works when passed a callable metric. + """ + metric = distance.euclidean + labels = HDBSCAN(metric=metric).fit_predict(X) + check_label_quality(labels) + + +@pytest.mark.parametrize("tree", ["kd_tree", "ball_tree"]) +def test_hdbscan_precomputed_non_brute(tree): + """ + Tests that HDBSCAN correctly raises an error when passing precomputed data + while requesting a tree-based algorithm. + """ + hdb = HDBSCAN(metric="precomputed", algorithm=tree) + msg = "precomputed is not a valid metric for" + with pytest.raises(ValueError, match=msg): + hdb.fit(X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_hdbscan_sparse(csr_container): + """ + Tests that HDBSCAN works correctly when passing sparse feature data. + Evaluates correctness by comparing against the same data passed as a dense + array. + """ + + dense_labels = HDBSCAN().fit(X).labels_ + check_label_quality(dense_labels) + + _X_sparse = csr_container(X) + X_sparse = _X_sparse.copy() + sparse_labels = HDBSCAN().fit(X_sparse).labels_ + assert_array_equal(dense_labels, sparse_labels) + + # Compare that the sparse and dense non-precomputed routines return the same labels + # where the 0th observation contains the outlier. + for outlier_val, outlier_type in ((np.inf, "infinite"), (np.nan, "missing")): + X_dense = X.copy() + X_dense[0, 0] = outlier_val + dense_labels = HDBSCAN().fit(X_dense).labels_ + check_label_quality(dense_labels) + assert dense_labels[0] == _OUTLIER_ENCODING[outlier_type]["label"] + + X_sparse = _X_sparse.copy() + X_sparse[0, 0] = outlier_val + sparse_labels = HDBSCAN().fit(X_sparse).labels_ + assert_array_equal(dense_labels, sparse_labels) + + msg = "Sparse data matrices only support algorithm `brute`." + with pytest.raises(ValueError, match=msg): + HDBSCAN(metric="euclidean", algorithm="ball_tree").fit(X_sparse) + + +@pytest.mark.parametrize("algorithm", ALGORITHMS) +def test_hdbscan_centers(algorithm): + """ + Tests that HDBSCAN centers are calculated and stored properly, and are + accurate to the data. + """ + centers = [(0.0, 0.0), (3.0, 3.0)] + H, _ = make_blobs(n_samples=2000, random_state=0, centers=centers, cluster_std=0.5) + hdb = HDBSCAN(store_centers="both").fit(H) + + for center, centroid, medoid in zip(centers, hdb.centroids_, hdb.medoids_): + assert_allclose(center, centroid, rtol=1, atol=0.05) + assert_allclose(center, medoid, rtol=1, atol=0.05) + + # Ensure that nothing is done for noise + hdb = HDBSCAN( + algorithm=algorithm, store_centers="both", min_cluster_size=X.shape[0] + ).fit(X) + assert hdb.centroids_.shape[0] == 0 + assert hdb.medoids_.shape[0] == 0 + + +def test_hdbscan_allow_single_cluster_with_epsilon(): + """ + Tests that HDBSCAN single-cluster selection with epsilon works correctly. + """ + rng = np.random.RandomState(0) + no_structure = rng.rand(150, 2) + # without epsilon we should see many noise points as children of root. + labels = HDBSCAN( + min_cluster_size=5, + cluster_selection_epsilon=0.0, + cluster_selection_method="eom", + allow_single_cluster=True, + ).fit_predict(no_structure) + unique_labels, counts = np.unique(labels, return_counts=True) + assert len(unique_labels) == 2 + + # Arbitrary heuristic. Would prefer something more precise. + assert counts[unique_labels == -1] > 30 + + # for this random seed an epsilon of 0.18 will produce exactly 2 noise + # points at that cut in single linkage. + labels = HDBSCAN( + min_cluster_size=5, + cluster_selection_epsilon=0.18, + cluster_selection_method="eom", + allow_single_cluster=True, + algorithm="kd_tree", + ).fit_predict(no_structure) + unique_labels, counts = np.unique(labels, return_counts=True) + assert len(unique_labels) == 2 + assert counts[unique_labels == -1] == 2 + + +def test_hdbscan_better_than_dbscan(): + """ + Validate that HDBSCAN can properly cluster this difficult synthetic + dataset. Note that DBSCAN fails on this (see HDBSCAN plotting + example) + """ + centers = [[-0.85, -0.85], [-0.85, 0.85], [3, 3], [3, -3]] + X, y = make_blobs( + n_samples=750, + centers=centers, + cluster_std=[0.2, 0.35, 1.35, 1.35], + random_state=0, + ) + labels = HDBSCAN().fit(X).labels_ + + n_clusters = len(set(labels)) - int(-1 in labels) + assert n_clusters == 4 + fowlkes_mallows_score(labels, y) > 0.99 + + +@pytest.mark.parametrize( + "kwargs, X", + [ + ({"metric": "precomputed"}, np.array([[1, np.inf], [np.inf, 1]])), + ({"metric": "precomputed"}, [[1, 2], [2, 1]]), + ({}, [[1, 2], [3, 4]]), + ], +) +def test_hdbscan_usable_inputs(X, kwargs): + """ + Tests that HDBSCAN works correctly for array-likes and precomputed inputs + with non-finite points. + """ + HDBSCAN(min_samples=1, **kwargs).fit(X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_hdbscan_sparse_distances_too_few_nonzero(csr_container): + """ + Tests that HDBSCAN raises the correct error when there are too few + non-zero distances. + """ + X = csr_container(np.zeros((10, 10))) + + msg = "There exists points with fewer than" + with pytest.raises(ValueError, match=msg): + HDBSCAN(metric="precomputed").fit(X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_hdbscan_sparse_distances_disconnected_graph(csr_container): + """ + Tests that HDBSCAN raises the correct error when the distance matrix + has multiple connected components. + """ + # Create symmetric sparse matrix with 2 connected components + X = np.zeros((20, 20)) + X[:5, :5] = 1 + X[5:, 15:] = 1 + X = X + X.T + X = csr_container(X) + msg = "HDBSCAN cannot be perfomed on a disconnected graph" + with pytest.raises(ValueError, match=msg): + HDBSCAN(metric="precomputed").fit(X) + + +def test_hdbscan_tree_invalid_metric(): + """ + Tests that HDBSCAN correctly raises an error for invalid metric choices. + """ + metric_callable = lambda x: x + msg = ( + ".* is not a valid metric for a .*-based algorithm\\. Please select a different" + " metric\\." + ) + + # Callables are not supported for either + with pytest.raises(ValueError, match=msg): + HDBSCAN(algorithm="kd_tree", metric=metric_callable).fit(X) + with pytest.raises(ValueError, match=msg): + HDBSCAN(algorithm="ball_tree", metric=metric_callable).fit(X) + + # The set of valid metrics for KDTree at the time of writing this test is a + # strict subset of those supported in BallTree + metrics_not_kd = list(set(BallTree.valid_metrics) - set(KDTree.valid_metrics)) + if len(metrics_not_kd) > 0: + with pytest.raises(ValueError, match=msg): + HDBSCAN(algorithm="kd_tree", metric=metrics_not_kd[0]).fit(X) + + +def test_hdbscan_too_many_min_samples(): + """ + Tests that HDBSCAN correctly raises an error when setting `min_samples` + larger than the number of samples. + """ + hdb = HDBSCAN(min_samples=len(X) + 1) + msg = r"min_samples (.*) must be at most" + with pytest.raises(ValueError, match=msg): + hdb.fit(X) + + +def test_hdbscan_precomputed_dense_nan(): + """ + Tests that HDBSCAN correctly raises an error when providing precomputed + distances with `np.nan` values. + """ + X_nan = X.copy() + X_nan[0, 0] = np.nan + msg = "np.nan values found in precomputed-dense" + hdb = HDBSCAN(metric="precomputed") + with pytest.raises(ValueError, match=msg): + hdb.fit(X_nan) + + +@pytest.mark.parametrize("allow_single_cluster", [True, False]) +@pytest.mark.parametrize("epsilon", [0, 0.1]) +def test_labelling_distinct(global_random_seed, allow_single_cluster, epsilon): + """ + Tests that the `_do_labelling` helper function correctly assigns labels. + """ + n_samples = 48 + X, y = make_blobs( + n_samples, + random_state=global_random_seed, + # Ensure the clusters are distinct with no overlap + centers=[ + [0, 0], + [10, 0], + [0, 10], + ], + ) + + est = HDBSCAN().fit(X) + condensed_tree = _condense_tree( + est._single_linkage_tree_, min_cluster_size=est.min_cluster_size + ) + clusters = {n_samples + 2, n_samples + 3, n_samples + 4} + cluster_label_map = {n_samples + 2: 0, n_samples + 3: 1, n_samples + 4: 2} + labels = _do_labelling( + condensed_tree=condensed_tree, + clusters=clusters, + cluster_label_map=cluster_label_map, + allow_single_cluster=allow_single_cluster, + cluster_selection_epsilon=epsilon, + ) + + first_with_label = {_y: np.where(y == _y)[0][0] for _y in list(set(y))} + y_to_labels = {_y: labels[first_with_label[_y]] for _y in list(set(y))} + aligned_target = np.vectorize(y_to_labels.get)(y) + assert_array_equal(labels, aligned_target) + + +def test_labelling_thresholding(): + """ + Tests that the `_do_labelling` helper function correctly thresholds the + incoming lambda values given various `cluster_selection_epsilon` values. + """ + n_samples = 5 + MAX_LAMBDA = 1.5 + condensed_tree = np.array( + [ + (5, 2, MAX_LAMBDA, 1), + (5, 1, 0.1, 1), + (5, 0, MAX_LAMBDA, 1), + (5, 3, 0.2, 1), + (5, 4, 0.3, 1), + ], + dtype=CONDENSED_dtype, + ) + labels = _do_labelling( + condensed_tree=condensed_tree, + clusters={n_samples}, + cluster_label_map={n_samples: 0, n_samples + 1: 1}, + allow_single_cluster=True, + cluster_selection_epsilon=1, + ) + num_noise = condensed_tree["value"] < 1 + assert sum(num_noise) == sum(labels == -1) + + labels = _do_labelling( + condensed_tree=condensed_tree, + clusters={n_samples}, + cluster_label_map={n_samples: 0, n_samples + 1: 1}, + allow_single_cluster=True, + cluster_selection_epsilon=0, + ) + # The threshold should be calculated per-sample based on the largest + # lambda of any simbling node. In this case, all points are siblings + # and the largest value is exactly MAX_LAMBDA. + num_noise = condensed_tree["value"] < MAX_LAMBDA + assert sum(num_noise) == sum(labels == -1) + + +# TODO(1.6): Remove +def test_hdbscan_warning_on_deprecated_algorithm_name(): + # Test that warning message is shown when algorithm='kdtree' + msg = ( + "`algorithm='kdtree'`has been deprecated in 1.4 and will be renamed" + " to'kd_tree'`in 1.6. To keep the past behaviour, set `algorithm='kd_tree'`." + ) + with pytest.warns(FutureWarning, match=msg): + HDBSCAN(algorithm="kdtree").fit(X) + + # Test that warning message is shown when algorithm='balltree' + msg = ( + "`algorithm='balltree'`has been deprecated in 1.4 and will be renamed" + " to'ball_tree'`in 1.6. To keep the past behaviour, set" + " `algorithm='ball_tree'`." + ) + with pytest.warns(FutureWarning, match=msg): + HDBSCAN(algorithm="balltree").fit(X) + + +@pytest.mark.parametrize("store_centers", ["centroid", "medoid"]) +def test_hdbscan_error_precomputed_and_store_centers(store_centers): + """Check that we raise an error if the centers are requested together with + a precomputed input matrix. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27893 + """ + rng = np.random.RandomState(0) + X = rng.random((100, 2)) + X_dist = euclidean_distances(X) + err_msg = "Cannot store centers when using a precomputed distance matrix." + with pytest.raises(ValueError, match=err_msg): + HDBSCAN(metric="precomputed", store_centers=store_centers).fit(X_dist) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_hierarchical.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_hierarchical.py new file mode 100644 index 0000000000000000000000000000000000000000..3c99dd50ea85f5273c30628d44ac95340d72dd43 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_hierarchical.py @@ -0,0 +1,899 @@ +""" +Several basic tests for hierarchical clustering procedures + +""" +# Authors: Vincent Michel, 2010, Gael Varoquaux 2012, +# Matteo Visconti di Oleggio Castello 2014 +# License: BSD 3 clause +import itertools +import shutil +from functools import partial +from tempfile import mkdtemp + +import numpy as np +import pytest +from scipy.cluster import hierarchy +from scipy.sparse.csgraph import connected_components + +from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration, ward_tree +from sklearn.cluster._agglomerative import ( + _TREE_BUILDERS, + _fix_connectivity, + _hc_cut, + linkage_tree, +) +from sklearn.cluster._hierarchical_fast import ( + average_merge, + max_merge, + mst_linkage_core, +) +from sklearn.datasets import make_circles, make_moons +from sklearn.feature_extraction.image import grid_to_graph +from sklearn.metrics import DistanceMetric +from sklearn.metrics.cluster import adjusted_rand_score, normalized_mutual_info_score +from sklearn.metrics.pairwise import ( + PAIRED_DISTANCES, + cosine_distances, + manhattan_distances, + pairwise_distances, +) +from sklearn.metrics.tests.test_dist_metrics import METRICS_DEFAULT_PARAMS +from sklearn.neighbors import kneighbors_graph +from sklearn.utils._fast_dict import IntFloatDict +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + create_memmap_backed_data, + ignore_warnings, +) +from sklearn.utils.fixes import LIL_CONTAINERS + + +def test_linkage_misc(): + # Misc tests on linkage + rng = np.random.RandomState(42) + X = rng.normal(size=(5, 5)) + + with pytest.raises(ValueError): + linkage_tree(X, linkage="foo") + + with pytest.raises(ValueError): + linkage_tree(X, connectivity=np.ones((4, 4))) + + # Smoke test FeatureAgglomeration + FeatureAgglomeration().fit(X) + + # test hierarchical clustering on a precomputed distances matrix + dis = cosine_distances(X) + + res = linkage_tree(dis, affinity="precomputed") + assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0]) + + # test hierarchical clustering on a precomputed distances matrix + res = linkage_tree(X, affinity=manhattan_distances) + assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0]) + + +def test_structured_linkage_tree(): + # Check that we obtain the correct solution for structured linkage trees. + rng = np.random.RandomState(0) + mask = np.ones([10, 10], dtype=bool) + # Avoiding a mask with only 'True' entries + mask[4:7, 4:7] = 0 + X = rng.randn(50, 100) + connectivity = grid_to_graph(*mask.shape) + for tree_builder in _TREE_BUILDERS.values(): + children, n_components, n_leaves, parent = tree_builder( + X.T, connectivity=connectivity + ) + n_nodes = 2 * X.shape[1] - 1 + assert len(children) + n_leaves == n_nodes + # Check that ward_tree raises a ValueError with a connectivity matrix + # of the wrong shape + with pytest.raises(ValueError): + tree_builder(X.T, connectivity=np.ones((4, 4))) + # Check that fitting with no samples raises an error + with pytest.raises(ValueError): + tree_builder(X.T[:0], connectivity=connectivity) + + +def test_unstructured_linkage_tree(): + # Check that we obtain the correct solution for unstructured linkage trees. + rng = np.random.RandomState(0) + X = rng.randn(50, 100) + for this_X in (X, X[0]): + # With specified a number of clusters just for the sake of + # raising a warning and testing the warning code + with ignore_warnings(): + with pytest.warns(UserWarning): + children, n_nodes, n_leaves, parent = ward_tree(this_X.T, n_clusters=10) + n_nodes = 2 * X.shape[1] - 1 + assert len(children) + n_leaves == n_nodes + + for tree_builder in _TREE_BUILDERS.values(): + for this_X in (X, X[0]): + with ignore_warnings(): + with pytest.warns(UserWarning): + children, n_nodes, n_leaves, parent = tree_builder( + this_X.T, n_clusters=10 + ) + n_nodes = 2 * X.shape[1] - 1 + assert len(children) + n_leaves == n_nodes + + +def test_height_linkage_tree(): + # Check that the height of the results of linkage tree is sorted. + rng = np.random.RandomState(0) + mask = np.ones([10, 10], dtype=bool) + X = rng.randn(50, 100) + connectivity = grid_to_graph(*mask.shape) + for linkage_func in _TREE_BUILDERS.values(): + children, n_nodes, n_leaves, parent = linkage_func( + X.T, connectivity=connectivity + ) + n_nodes = 2 * X.shape[1] - 1 + assert len(children) + n_leaves == n_nodes + + +def test_zero_cosine_linkage_tree(): + # Check that zero vectors in X produce an error when + # 'cosine' affinity is used + X = np.array([[0, 1], [0, 0]]) + msg = "Cosine affinity cannot be used when X contains zero vectors" + with pytest.raises(ValueError, match=msg): + linkage_tree(X, affinity="cosine") + + +@pytest.mark.parametrize("n_clusters, distance_threshold", [(None, 0.5), (10, None)]) +@pytest.mark.parametrize("compute_distances", [True, False]) +@pytest.mark.parametrize("linkage", ["ward", "complete", "average", "single"]) +def test_agglomerative_clustering_distances( + n_clusters, compute_distances, distance_threshold, linkage +): + # Check that when `compute_distances` is True or `distance_threshold` is + # given, the fitted model has an attribute `distances_`. + rng = np.random.RandomState(0) + mask = np.ones([10, 10], dtype=bool) + n_samples = 100 + X = rng.randn(n_samples, 50) + connectivity = grid_to_graph(*mask.shape) + + clustering = AgglomerativeClustering( + n_clusters=n_clusters, + connectivity=connectivity, + linkage=linkage, + distance_threshold=distance_threshold, + compute_distances=compute_distances, + ) + clustering.fit(X) + if compute_distances or (distance_threshold is not None): + assert hasattr(clustering, "distances_") + n_children = clustering.children_.shape[0] + n_nodes = n_children + 1 + assert clustering.distances_.shape == (n_nodes - 1,) + else: + assert not hasattr(clustering, "distances_") + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_agglomerative_clustering(global_random_seed, lil_container): + # Check that we obtain the correct number of clusters with + # agglomerative clustering. + rng = np.random.RandomState(global_random_seed) + mask = np.ones([10, 10], dtype=bool) + n_samples = 100 + X = rng.randn(n_samples, 50) + connectivity = grid_to_graph(*mask.shape) + for linkage in ("ward", "complete", "average", "single"): + clustering = AgglomerativeClustering( + n_clusters=10, connectivity=connectivity, linkage=linkage + ) + clustering.fit(X) + # test caching + try: + tempdir = mkdtemp() + clustering = AgglomerativeClustering( + n_clusters=10, + connectivity=connectivity, + memory=tempdir, + linkage=linkage, + ) + clustering.fit(X) + labels = clustering.labels_ + assert np.size(np.unique(labels)) == 10 + finally: + shutil.rmtree(tempdir) + # Turn caching off now + clustering = AgglomerativeClustering( + n_clusters=10, connectivity=connectivity, linkage=linkage + ) + # Check that we obtain the same solution with early-stopping of the + # tree building + clustering.compute_full_tree = False + clustering.fit(X) + assert_almost_equal(normalized_mutual_info_score(clustering.labels_, labels), 1) + clustering.connectivity = None + clustering.fit(X) + assert np.size(np.unique(clustering.labels_)) == 10 + # Check that we raise a TypeError on dense matrices + clustering = AgglomerativeClustering( + n_clusters=10, + connectivity=lil_container(connectivity.toarray()[:10, :10]), + linkage=linkage, + ) + with pytest.raises(ValueError): + clustering.fit(X) + + # Test that using ward with another metric than euclidean raises an + # exception + clustering = AgglomerativeClustering( + n_clusters=10, + connectivity=connectivity.toarray(), + metric="manhattan", + linkage="ward", + ) + with pytest.raises(ValueError): + clustering.fit(X) + + # Test using another metric than euclidean works with linkage complete + for metric in PAIRED_DISTANCES.keys(): + # Compare our (structured) implementation to scipy + clustering = AgglomerativeClustering( + n_clusters=10, + connectivity=np.ones((n_samples, n_samples)), + metric=metric, + linkage="complete", + ) + clustering.fit(X) + clustering2 = AgglomerativeClustering( + n_clusters=10, connectivity=None, metric=metric, linkage="complete" + ) + clustering2.fit(X) + assert_almost_equal( + normalized_mutual_info_score(clustering2.labels_, clustering.labels_), 1 + ) + + # Test that using a distance matrix (affinity = 'precomputed') has same + # results (with connectivity constraints) + clustering = AgglomerativeClustering( + n_clusters=10, connectivity=connectivity, linkage="complete" + ) + clustering.fit(X) + X_dist = pairwise_distances(X) + clustering2 = AgglomerativeClustering( + n_clusters=10, + connectivity=connectivity, + metric="precomputed", + linkage="complete", + ) + clustering2.fit(X_dist) + assert_array_equal(clustering.labels_, clustering2.labels_) + + +def test_agglomerative_clustering_memory_mapped(): + """AgglomerativeClustering must work on mem-mapped dataset. + + Non-regression test for issue #19875. + """ + rng = np.random.RandomState(0) + Xmm = create_memmap_backed_data(rng.randn(50, 100)) + AgglomerativeClustering(metric="euclidean", linkage="single").fit(Xmm) + + +def test_ward_agglomeration(global_random_seed): + # Check that we obtain the correct solution in a simplistic case + rng = np.random.RandomState(global_random_seed) + mask = np.ones([10, 10], dtype=bool) + X = rng.randn(50, 100) + connectivity = grid_to_graph(*mask.shape) + agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity) + agglo.fit(X) + assert np.size(np.unique(agglo.labels_)) == 5 + + X_red = agglo.transform(X) + assert X_red.shape[1] == 5 + X_full = agglo.inverse_transform(X_red) + assert np.unique(X_full[0]).size == 5 + assert_array_almost_equal(agglo.transform(X_full), X_red) + + # Check that fitting with no samples raises a ValueError + with pytest.raises(ValueError): + agglo.fit(X[:0]) + + +def test_single_linkage_clustering(): + # Check that we get the correct result in two emblematic cases + moons, moon_labels = make_moons(noise=0.05, random_state=42) + clustering = AgglomerativeClustering(n_clusters=2, linkage="single") + clustering.fit(moons) + assert_almost_equal( + normalized_mutual_info_score(clustering.labels_, moon_labels), 1 + ) + + circles, circle_labels = make_circles(factor=0.5, noise=0.025, random_state=42) + clustering = AgglomerativeClustering(n_clusters=2, linkage="single") + clustering.fit(circles) + assert_almost_equal( + normalized_mutual_info_score(clustering.labels_, circle_labels), 1 + ) + + +def assess_same_labelling(cut1, cut2): + """Util for comparison with scipy""" + co_clust = [] + for cut in [cut1, cut2]: + n = len(cut) + k = cut.max() + 1 + ecut = np.zeros((n, k)) + ecut[np.arange(n), cut] = 1 + co_clust.append(np.dot(ecut, ecut.T)) + assert (co_clust[0] == co_clust[1]).all() + + +def test_sparse_scikit_vs_scipy(global_random_seed): + # Test scikit linkage with full connectivity (i.e. unstructured) vs scipy + n, p, k = 10, 5, 3 + rng = np.random.RandomState(global_random_seed) + + # Not using a lil_matrix here, just to check that non sparse + # matrices are well handled + connectivity = np.ones((n, n)) + for linkage in _TREE_BUILDERS.keys(): + for i in range(5): + X = 0.1 * rng.normal(size=(n, p)) + X -= 4.0 * np.arange(n)[:, np.newaxis] + X -= X.mean(axis=1)[:, np.newaxis] + + out = hierarchy.linkage(X, method=linkage) + + children_ = out[:, :2].astype(int, copy=False) + children, _, n_leaves, _ = _TREE_BUILDERS[linkage]( + X, connectivity=connectivity + ) + + # Sort the order of child nodes per row for consistency + children.sort(axis=1) + assert_array_equal( + children, + children_, + "linkage tree differs from scipy impl for linkage: " + linkage, + ) + + cut = _hc_cut(k, children, n_leaves) + cut_ = _hc_cut(k, children_, n_leaves) + assess_same_labelling(cut, cut_) + + # Test error management in _hc_cut + with pytest.raises(ValueError): + _hc_cut(n_leaves + 1, children, n_leaves) + + +# Make sure our custom mst_linkage_core gives +# the same results as scipy's builtin +def test_vector_scikit_single_vs_scipy_single(global_random_seed): + n_samples, n_features, n_clusters = 10, 5, 3 + rng = np.random.RandomState(global_random_seed) + X = 0.1 * rng.normal(size=(n_samples, n_features)) + X -= 4.0 * np.arange(n_samples)[:, np.newaxis] + X -= X.mean(axis=1)[:, np.newaxis] + + out = hierarchy.linkage(X, method="single") + children_scipy = out[:, :2].astype(int) + + children, _, n_leaves, _ = _TREE_BUILDERS["single"](X) + + # Sort the order of child nodes per row for consistency + children.sort(axis=1) + assert_array_equal( + children, + children_scipy, + "linkage tree differs from scipy impl for single linkage.", + ) + + cut = _hc_cut(n_clusters, children, n_leaves) + cut_scipy = _hc_cut(n_clusters, children_scipy, n_leaves) + assess_same_labelling(cut, cut_scipy) + + +@pytest.mark.parametrize("metric_param_grid", METRICS_DEFAULT_PARAMS) +def test_mst_linkage_core_memory_mapped(metric_param_grid): + """The MST-LINKAGE-CORE algorithm must work on mem-mapped dataset. + + Non-regression test for issue #19875. + """ + rng = np.random.RandomState(seed=1) + X = rng.normal(size=(20, 4)) + Xmm = create_memmap_backed_data(X) + metric, param_grid = metric_param_grid + keys = param_grid.keys() + for vals in itertools.product(*param_grid.values()): + kwargs = dict(zip(keys, vals)) + distance_metric = DistanceMetric.get_metric(metric, **kwargs) + mst = mst_linkage_core(X, distance_metric) + mst_mm = mst_linkage_core(Xmm, distance_metric) + np.testing.assert_equal(mst, mst_mm) + + +def test_identical_points(): + # Ensure identical points are handled correctly when using mst with + # a sparse connectivity matrix + X = np.array([[0, 0, 0], [0, 0, 0], [1, 1, 1], [1, 1, 1], [2, 2, 2], [2, 2, 2]]) + true_labels = np.array([0, 0, 1, 1, 2, 2]) + connectivity = kneighbors_graph(X, n_neighbors=3, include_self=False) + connectivity = 0.5 * (connectivity + connectivity.T) + connectivity, n_components = _fix_connectivity(X, connectivity, "euclidean") + + for linkage in ("single", "average", "average", "ward"): + clustering = AgglomerativeClustering( + n_clusters=3, linkage=linkage, connectivity=connectivity + ) + clustering.fit(X) + + assert_almost_equal( + normalized_mutual_info_score(clustering.labels_, true_labels), 1 + ) + + +def test_connectivity_propagation(): + # Check that connectivity in the ward tree is propagated correctly during + # merging. + X = np.array( + [ + (0.014, 0.120), + (0.014, 0.099), + (0.014, 0.097), + (0.017, 0.153), + (0.017, 0.153), + (0.018, 0.153), + (0.018, 0.153), + (0.018, 0.153), + (0.018, 0.153), + (0.018, 0.153), + (0.018, 0.153), + (0.018, 0.153), + (0.018, 0.152), + (0.018, 0.149), + (0.018, 0.144), + ] + ) + connectivity = kneighbors_graph(X, 10, include_self=False) + ward = AgglomerativeClustering( + n_clusters=4, connectivity=connectivity, linkage="ward" + ) + # If changes are not propagated correctly, fit crashes with an + # IndexError + ward.fit(X) + + +def test_ward_tree_children_order(global_random_seed): + # Check that children are ordered in the same way for both structured and + # unstructured versions of ward_tree. + + # test on five random datasets + n, p = 10, 5 + rng = np.random.RandomState(global_random_seed) + + connectivity = np.ones((n, n)) + for i in range(5): + X = 0.1 * rng.normal(size=(n, p)) + X -= 4.0 * np.arange(n)[:, np.newaxis] + X -= X.mean(axis=1)[:, np.newaxis] + + out_unstructured = ward_tree(X) + out_structured = ward_tree(X, connectivity=connectivity) + + assert_array_equal(out_unstructured[0], out_structured[0]) + + +def test_ward_linkage_tree_return_distance(global_random_seed): + # Test return_distance option on linkage and ward trees + + # test that return_distance when set true, gives same + # output on both structured and unstructured clustering. + n, p = 10, 5 + rng = np.random.RandomState(global_random_seed) + + connectivity = np.ones((n, n)) + for i in range(5): + X = 0.1 * rng.normal(size=(n, p)) + X -= 4.0 * np.arange(n)[:, np.newaxis] + X -= X.mean(axis=1)[:, np.newaxis] + + out_unstructured = ward_tree(X, return_distance=True) + out_structured = ward_tree(X, connectivity=connectivity, return_distance=True) + + # get children + children_unstructured = out_unstructured[0] + children_structured = out_structured[0] + + # check if we got the same clusters + assert_array_equal(children_unstructured, children_structured) + + # check if the distances are the same + dist_unstructured = out_unstructured[-1] + dist_structured = out_structured[-1] + + assert_array_almost_equal(dist_unstructured, dist_structured) + + for linkage in ["average", "complete", "single"]: + structured_items = linkage_tree( + X, connectivity=connectivity, linkage=linkage, return_distance=True + )[-1] + unstructured_items = linkage_tree(X, linkage=linkage, return_distance=True)[ + -1 + ] + structured_dist = structured_items[-1] + unstructured_dist = unstructured_items[-1] + structured_children = structured_items[0] + unstructured_children = unstructured_items[0] + assert_array_almost_equal(structured_dist, unstructured_dist) + assert_array_almost_equal(structured_children, unstructured_children) + + # test on the following dataset where we know the truth + # taken from scipy/cluster/tests/hierarchy_test_data.py + X = np.array( + [ + [1.43054825, -7.5693489], + [6.95887839, 6.82293382], + [2.87137846, -9.68248579], + [7.87974764, -6.05485803], + [8.24018364, -6.09495602], + [7.39020262, 8.54004355], + ] + ) + # truth + linkage_X_ward = np.array( + [ + [3.0, 4.0, 0.36265956, 2.0], + [1.0, 5.0, 1.77045373, 2.0], + [0.0, 2.0, 2.55760419, 2.0], + [6.0, 8.0, 9.10208346, 4.0], + [7.0, 9.0, 24.7784379, 6.0], + ] + ) + + linkage_X_complete = np.array( + [ + [3.0, 4.0, 0.36265956, 2.0], + [1.0, 5.0, 1.77045373, 2.0], + [0.0, 2.0, 2.55760419, 2.0], + [6.0, 8.0, 6.96742194, 4.0], + [7.0, 9.0, 18.77445997, 6.0], + ] + ) + + linkage_X_average = np.array( + [ + [3.0, 4.0, 0.36265956, 2.0], + [1.0, 5.0, 1.77045373, 2.0], + [0.0, 2.0, 2.55760419, 2.0], + [6.0, 8.0, 6.55832839, 4.0], + [7.0, 9.0, 15.44089605, 6.0], + ] + ) + + n_samples, n_features = np.shape(X) + connectivity_X = np.ones((n_samples, n_samples)) + + out_X_unstructured = ward_tree(X, return_distance=True) + out_X_structured = ward_tree(X, connectivity=connectivity_X, return_distance=True) + + # check that the labels are the same + assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0]) + assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0]) + + # check that the distances are correct + assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4]) + assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4]) + + linkage_options = ["complete", "average", "single"] + X_linkage_truth = [linkage_X_complete, linkage_X_average] + for linkage, X_truth in zip(linkage_options, X_linkage_truth): + out_X_unstructured = linkage_tree(X, return_distance=True, linkage=linkage) + out_X_structured = linkage_tree( + X, connectivity=connectivity_X, linkage=linkage, return_distance=True + ) + + # check that the labels are the same + assert_array_equal(X_truth[:, :2], out_X_unstructured[0]) + assert_array_equal(X_truth[:, :2], out_X_structured[0]) + + # check that the distances are correct + assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4]) + assert_array_almost_equal(X_truth[:, 2], out_X_structured[4]) + + +def test_connectivity_fixing_non_lil(): + # Check non regression of a bug if a non item assignable connectivity is + # provided with more than one component. + # create dummy data + x = np.array([[0, 0], [1, 1]]) + # create a mask with several components to force connectivity fixing + m = np.array([[True, False], [False, True]]) + c = grid_to_graph(n_x=2, n_y=2, mask=m) + w = AgglomerativeClustering(connectivity=c, linkage="ward") + with pytest.warns(UserWarning): + w.fit(x) + + +def test_int_float_dict(): + rng = np.random.RandomState(0) + keys = np.unique(rng.randint(100, size=10).astype(np.intp, copy=False)) + values = rng.rand(len(keys)) + + d = IntFloatDict(keys, values) + for key, value in zip(keys, values): + assert d[key] == value + + other_keys = np.arange(50, dtype=np.intp)[::2] + other_values = np.full(50, 0.5)[::2] + other = IntFloatDict(other_keys, other_values) + # Complete smoke test + max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1) + average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1) + + +def test_connectivity_callable(): + rng = np.random.RandomState(0) + X = rng.rand(20, 5) + connectivity = kneighbors_graph(X, 3, include_self=False) + aglc1 = AgglomerativeClustering(connectivity=connectivity) + aglc2 = AgglomerativeClustering( + connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False) + ) + aglc1.fit(X) + aglc2.fit(X) + assert_array_equal(aglc1.labels_, aglc2.labels_) + + +def test_connectivity_ignores_diagonal(): + rng = np.random.RandomState(0) + X = rng.rand(20, 5) + connectivity = kneighbors_graph(X, 3, include_self=False) + connectivity_include_self = kneighbors_graph(X, 3, include_self=True) + aglc1 = AgglomerativeClustering(connectivity=connectivity) + aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self) + aglc1.fit(X) + aglc2.fit(X) + assert_array_equal(aglc1.labels_, aglc2.labels_) + + +def test_compute_full_tree(): + # Test that the full tree is computed if n_clusters is small + rng = np.random.RandomState(0) + X = rng.randn(10, 2) + connectivity = kneighbors_graph(X, 5, include_self=False) + + # When n_clusters is less, the full tree should be built + # that is the number of merges should be n_samples - 1 + agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity) + agc.fit(X) + n_samples = X.shape[0] + n_nodes = agc.children_.shape[0] + assert n_nodes == n_samples - 1 + + # When n_clusters is large, greater than max of 100 and 0.02 * n_samples. + # we should stop when there are n_clusters. + n_clusters = 101 + X = rng.randn(200, 2) + connectivity = kneighbors_graph(X, 10, include_self=False) + agc = AgglomerativeClustering(n_clusters=n_clusters, connectivity=connectivity) + agc.fit(X) + n_samples = X.shape[0] + n_nodes = agc.children_.shape[0] + assert n_nodes == n_samples - n_clusters + + +def test_n_components(): + # Test n_components returned by linkage, average and ward tree + rng = np.random.RandomState(0) + X = rng.rand(5, 5) + + # Connectivity matrix having five components. + connectivity = np.eye(5) + + for linkage_func in _TREE_BUILDERS.values(): + assert ignore_warnings(linkage_func)(X, connectivity=connectivity)[1] == 5 + + +def test_affinity_passed_to_fix_connectivity(): + # Test that the affinity parameter is actually passed to the pairwise + # function + + size = 2 + rng = np.random.RandomState(0) + X = rng.randn(size, size) + mask = np.array([True, False, False, True]) + + connectivity = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray) + + class FakeAffinity: + def __init__(self): + self.counter = 0 + + def increment(self, *args, **kwargs): + self.counter += 1 + return self.counter + + fa = FakeAffinity() + + linkage_tree(X, connectivity=connectivity, affinity=fa.increment) + + assert fa.counter == 3 + + +@pytest.mark.parametrize("linkage", ["ward", "complete", "average"]) +def test_agglomerative_clustering_with_distance_threshold(linkage, global_random_seed): + # Check that we obtain the correct number of clusters with + # agglomerative clustering with distance_threshold. + rng = np.random.RandomState(global_random_seed) + mask = np.ones([10, 10], dtype=bool) + n_samples = 100 + X = rng.randn(n_samples, 50) + connectivity = grid_to_graph(*mask.shape) + # test when distance threshold is set to 10 + distance_threshold = 10 + for conn in [None, connectivity]: + clustering = AgglomerativeClustering( + n_clusters=None, + distance_threshold=distance_threshold, + connectivity=conn, + linkage=linkage, + ) + clustering.fit(X) + clusters_produced = clustering.labels_ + num_clusters_produced = len(np.unique(clustering.labels_)) + # test if the clusters produced match the point in the linkage tree + # where the distance exceeds the threshold + tree_builder = _TREE_BUILDERS[linkage] + children, n_components, n_leaves, parent, distances = tree_builder( + X, connectivity=conn, n_clusters=None, return_distance=True + ) + num_clusters_at_threshold = ( + np.count_nonzero(distances >= distance_threshold) + 1 + ) + # test number of clusters produced + assert num_clusters_at_threshold == num_clusters_produced + # test clusters produced + clusters_at_threshold = _hc_cut( + n_clusters=num_clusters_produced, children=children, n_leaves=n_leaves + ) + assert np.array_equiv(clusters_produced, clusters_at_threshold) + + +def test_small_distance_threshold(global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 10 + X = rng.randint(-300, 300, size=(n_samples, 3)) + # this should result in all data in their own clusters, given that + # their pairwise distances are bigger than .1 (which may not be the case + # with a different random seed). + clustering = AgglomerativeClustering( + n_clusters=None, distance_threshold=1.0, linkage="single" + ).fit(X) + # check that the pairwise distances are indeed all larger than .1 + all_distances = pairwise_distances(X, metric="minkowski", p=2) + np.fill_diagonal(all_distances, np.inf) + assert np.all(all_distances > 0.1) + assert clustering.n_clusters_ == n_samples + + +def test_cluster_distances_with_distance_threshold(global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 100 + X = rng.randint(-10, 10, size=(n_samples, 3)) + # check the distances within the clusters and with other clusters + distance_threshold = 4 + clustering = AgglomerativeClustering( + n_clusters=None, distance_threshold=distance_threshold, linkage="single" + ).fit(X) + labels = clustering.labels_ + D = pairwise_distances(X, metric="minkowski", p=2) + # to avoid taking the 0 diagonal in min() + np.fill_diagonal(D, np.inf) + for label in np.unique(labels): + in_cluster_mask = labels == label + max_in_cluster_distance = ( + D[in_cluster_mask][:, in_cluster_mask].min(axis=0).max() + ) + min_out_cluster_distance = ( + D[in_cluster_mask][:, ~in_cluster_mask].min(axis=0).min() + ) + # single data point clusters only have that inf diagonal here + if in_cluster_mask.sum() > 1: + assert max_in_cluster_distance < distance_threshold + assert min_out_cluster_distance >= distance_threshold + + +@pytest.mark.parametrize("linkage", ["ward", "complete", "average"]) +@pytest.mark.parametrize( + ("threshold", "y_true"), [(0.5, [1, 0]), (1.0, [1, 0]), (1.5, [0, 0])] +) +def test_agglomerative_clustering_with_distance_threshold_edge_case( + linkage, threshold, y_true +): + # test boundary case of distance_threshold matching the distance + X = [[0], [1]] + clusterer = AgglomerativeClustering( + n_clusters=None, distance_threshold=threshold, linkage=linkage + ) + y_pred = clusterer.fit_predict(X) + assert adjusted_rand_score(y_true, y_pred) == 1 + + +def test_dist_threshold_invalid_parameters(): + X = [[0], [1]] + with pytest.raises(ValueError, match="Exactly one of "): + AgglomerativeClustering(n_clusters=None, distance_threshold=None).fit(X) + + with pytest.raises(ValueError, match="Exactly one of "): + AgglomerativeClustering(n_clusters=2, distance_threshold=1).fit(X) + + X = [[0], [1]] + with pytest.raises(ValueError, match="compute_full_tree must be True if"): + AgglomerativeClustering( + n_clusters=None, distance_threshold=1, compute_full_tree=False + ).fit(X) + + +def test_invalid_shape_precomputed_dist_matrix(): + # Check that an error is raised when affinity='precomputed' + # and a non square matrix is passed (PR #16257). + rng = np.random.RandomState(0) + X = rng.rand(5, 3) + with pytest.raises( + ValueError, + match=r"Distance matrix should be square, got matrix of shape \(5, 3\)", + ): + AgglomerativeClustering(metric="precomputed", linkage="complete").fit(X) + + +def test_precomputed_connectivity_metric_with_2_connected_components(): + """Check that connecting components works when connectivity and + affinity are both precomputed and the number of connected components is + greater than 1. Non-regression test for #16151. + """ + + connectivity_matrix = np.array( + [ + [0, 1, 1, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 1], + [0, 0, 0, 0, 0], + ] + ) + # ensure that connectivity_matrix has two connected components + assert connected_components(connectivity_matrix)[0] == 2 + + rng = np.random.RandomState(0) + X = rng.randn(5, 10) + + X_dist = pairwise_distances(X) + clusterer_precomputed = AgglomerativeClustering( + metric="precomputed", connectivity=connectivity_matrix, linkage="complete" + ) + msg = "Completing it to avoid stopping the tree early" + with pytest.warns(UserWarning, match=msg): + clusterer_precomputed.fit(X_dist) + + clusterer = AgglomerativeClustering( + connectivity=connectivity_matrix, linkage="complete" + ) + with pytest.warns(UserWarning, match=msg): + clusterer.fit(X) + + assert_array_equal(clusterer.labels_, clusterer_precomputed.labels_) + assert_array_equal(clusterer.children_, clusterer_precomputed.children_) + + +# TODO(1.6): remove in 1.6 +@pytest.mark.parametrize( + "Agglomeration", [AgglomerativeClustering, FeatureAgglomeration] +) +def test_deprecation_warning_metric_None(Agglomeration): + X = np.array([[1, 2], [1, 4], [1, 0], [4, 2], [4, 4], [4, 0]]) + warn_msg = "`metric=None` is deprecated in version 1.4 and will be removed" + with pytest.warns(FutureWarning, match=warn_msg): + Agglomeration(metric=None).fit(X) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_k_means.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_k_means.py new file mode 100644 index 0000000000000000000000000000000000000000..4a112a30b29ed6edd5223b612d727c4784bce8e6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_k_means.py @@ -0,0 +1,1372 @@ +"""Testing for K-means""" +import re +import sys +from io import StringIO + +import numpy as np +import pytest +from scipy import sparse as sp + +from sklearn.base import clone +from sklearn.cluster import KMeans, MiniBatchKMeans, k_means, kmeans_plusplus +from sklearn.cluster._k_means_common import ( + _euclidean_dense_dense_wrapper, + _euclidean_sparse_dense_wrapper, + _inertia_dense, + _inertia_sparse, + _is_same_clustering, + _relocate_empty_clusters_dense, + _relocate_empty_clusters_sparse, +) +from sklearn.cluster._kmeans import _labels_inertia, _mini_batch_step +from sklearn.datasets import make_blobs +from sklearn.exceptions import ConvergenceWarning +from sklearn.metrics import pairwise_distances, pairwise_distances_argmin +from sklearn.metrics.cluster import v_measure_score +from sklearn.metrics.pairwise import euclidean_distances +from sklearn.utils._testing import ( + assert_allclose, + assert_array_equal, + create_memmap_backed_data, +) +from sklearn.utils.extmath import row_norms +from sklearn.utils.fixes import CSR_CONTAINERS, threadpool_limits + +# non centered, sparse centers to check the +centers = np.array( + [ + [0.0, 5.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 4.0, 0.0, 0.0], + [1.0, 0.0, 0.0, 5.0, 1.0], + ] +) +n_samples = 100 +n_clusters, n_features = centers.shape +X, true_labels = make_blobs( + n_samples=n_samples, centers=centers, cluster_std=1.0, random_state=42 +) +X_as_any_csr = [container(X) for container in CSR_CONTAINERS] +data_containers = [np.array] + CSR_CONTAINERS +data_containers_ids = ( + ["dense", "sparse_matrix", "sparse_array"] + if len(X_as_any_csr) == 2 + else ["dense", "sparse_matrix"] +) + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize("algo", ["lloyd", "elkan"]) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_kmeans_results(array_constr, algo, dtype): + # Checks that KMeans works as intended on toy dataset by comparing with + # expected results computed by hand. + X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=dtype) + sample_weight = [3, 1, 1, 3] + init_centers = np.array([[0, 0], [1, 1]], dtype=dtype) + + expected_labels = [0, 0, 1, 1] + expected_inertia = 0.375 + expected_centers = np.array([[0.125, 0], [0.875, 1]], dtype=dtype) + expected_n_iter = 2 + + kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo) + kmeans.fit(X, sample_weight=sample_weight) + + assert_array_equal(kmeans.labels_, expected_labels) + assert_allclose(kmeans.inertia_, expected_inertia) + assert_allclose(kmeans.cluster_centers_, expected_centers) + assert kmeans.n_iter_ == expected_n_iter + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize("algo", ["lloyd", "elkan"]) +def test_kmeans_relocated_clusters(array_constr, algo): + # check that empty clusters are relocated as expected + X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]]) + + # second center too far from others points will be empty at first iter + init_centers = np.array([[0.5, 0.5], [3, 3]]) + + kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo) + kmeans.fit(X) + + expected_n_iter = 3 + expected_inertia = 0.25 + assert_allclose(kmeans.inertia_, expected_inertia) + assert kmeans.n_iter_ == expected_n_iter + + # There are two acceptable ways of relocating clusters in this example, the output + # depends on how the argpartition strategy breaks ties. We accept both outputs. + try: + expected_labels = [0, 0, 1, 1] + expected_centers = [[0.25, 0], [0.75, 1]] + assert_array_equal(kmeans.labels_, expected_labels) + assert_allclose(kmeans.cluster_centers_, expected_centers) + except AssertionError: + expected_labels = [1, 1, 0, 0] + expected_centers = [[0.75, 1.0], [0.25, 0.0]] + assert_array_equal(kmeans.labels_, expected_labels) + assert_allclose(kmeans.cluster_centers_, expected_centers) + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +def test_relocate_empty_clusters(array_constr): + # test for the _relocate_empty_clusters_(dense/sparse) helpers + + # Synthetic dataset with 3 obvious clusters of different sizes + X = np.array([-10.0, -9.5, -9, -8.5, -8, -1, 1, 9, 9.5, 10]).reshape(-1, 1) + X = array_constr(X) + sample_weight = np.ones(10) + + # centers all initialized to the first point of X + centers_old = np.array([-10.0, -10, -10]).reshape(-1, 1) + + # With this initialization, all points will be assigned to the first center + # At this point a center in centers_new is the weighted sum of the points + # it contains if it's not empty, otherwise it is the same as before. + centers_new = np.array([-16.5, -10, -10]).reshape(-1, 1) + weight_in_clusters = np.array([10.0, 0, 0]) + labels = np.zeros(10, dtype=np.int32) + + if array_constr is np.array: + _relocate_empty_clusters_dense( + X, sample_weight, centers_old, centers_new, weight_in_clusters, labels + ) + else: + _relocate_empty_clusters_sparse( + X.data, + X.indices, + X.indptr, + sample_weight, + centers_old, + centers_new, + weight_in_clusters, + labels, + ) + + # The relocation scheme will take the 2 points farthest from the center and + # assign them to the 2 empty clusters, i.e. points at 10 and at 9.9. The + # first center will be updated to contain the other 8 points. + assert_array_equal(weight_in_clusters, [8, 1, 1]) + assert_allclose(centers_new, [[-36], [10], [9.5]]) + + +@pytest.mark.parametrize("distribution", ["normal", "blobs"]) +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize("tol", [1e-2, 1e-8, 1e-100, 0]) +def test_kmeans_elkan_results(distribution, array_constr, tol, global_random_seed): + # Check that results are identical between lloyd and elkan algorithms + rnd = np.random.RandomState(global_random_seed) + if distribution == "normal": + X = rnd.normal(size=(5000, 10)) + else: + X, _ = make_blobs(random_state=rnd) + X[X < 0] = 0 + X = array_constr(X) + + km_lloyd = KMeans(n_clusters=5, random_state=global_random_seed, n_init=1, tol=tol) + km_elkan = KMeans( + algorithm="elkan", + n_clusters=5, + random_state=global_random_seed, + n_init=1, + tol=tol, + ) + + km_lloyd.fit(X) + km_elkan.fit(X) + assert_allclose(km_elkan.cluster_centers_, km_lloyd.cluster_centers_) + assert_array_equal(km_elkan.labels_, km_lloyd.labels_) + assert km_elkan.n_iter_ == km_lloyd.n_iter_ + assert km_elkan.inertia_ == pytest.approx(km_lloyd.inertia_, rel=1e-6) + + +@pytest.mark.parametrize("algorithm", ["lloyd", "elkan"]) +def test_kmeans_convergence(algorithm, global_random_seed): + # Check that KMeans stops when convergence is reached when tol=0. (#16075) + rnd = np.random.RandomState(global_random_seed) + X = rnd.normal(size=(5000, 10)) + max_iter = 300 + + km = KMeans( + algorithm=algorithm, + n_clusters=5, + random_state=global_random_seed, + n_init=1, + tol=0, + max_iter=max_iter, + ).fit(X) + + assert km.n_iter_ < max_iter + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_predict_sample_weight_deprecation_warning(Estimator): + X = np.random.rand(100, 2) + sample_weight = np.random.uniform(size=100) + kmeans = Estimator() + kmeans.fit(X, sample_weight=sample_weight) + warn_msg = ( + "'sample_weight' was deprecated in version 1.3 and will be removed in 1.5." + ) + with pytest.warns(FutureWarning, match=warn_msg): + kmeans.predict(X, sample_weight=sample_weight) + + +@pytest.mark.parametrize("X_csr", X_as_any_csr) +def test_minibatch_update_consistency(X_csr, global_random_seed): + # Check that dense and sparse minibatch update give the same results + rng = np.random.RandomState(global_random_seed) + + centers_old = centers + rng.normal(size=centers.shape) + centers_old_csr = centers_old.copy() + + centers_new = np.zeros_like(centers_old) + centers_new_csr = np.zeros_like(centers_old_csr) + + weight_sums = np.zeros(centers_old.shape[0], dtype=X.dtype) + weight_sums_csr = np.zeros(centers_old.shape[0], dtype=X.dtype) + + sample_weight = np.ones(X.shape[0], dtype=X.dtype) + + # extract a small minibatch + X_mb = X[:10] + X_mb_csr = X_csr[:10] + sample_weight_mb = sample_weight[:10] + + # step 1: compute the dense minibatch update + old_inertia = _mini_batch_step( + X_mb, + sample_weight_mb, + centers_old, + centers_new, + weight_sums, + np.random.RandomState(global_random_seed), + random_reassign=False, + ) + assert old_inertia > 0.0 + + # compute the new inertia on the same batch to check that it decreased + labels, new_inertia = _labels_inertia(X_mb, sample_weight_mb, centers_new) + assert new_inertia > 0.0 + assert new_inertia < old_inertia + + # step 2: compute the sparse minibatch update + old_inertia_csr = _mini_batch_step( + X_mb_csr, + sample_weight_mb, + centers_old_csr, + centers_new_csr, + weight_sums_csr, + np.random.RandomState(global_random_seed), + random_reassign=False, + ) + assert old_inertia_csr > 0.0 + + # compute the new inertia on the same batch to check that it decreased + labels_csr, new_inertia_csr = _labels_inertia( + X_mb_csr, sample_weight_mb, centers_new_csr + ) + assert new_inertia_csr > 0.0 + assert new_inertia_csr < old_inertia_csr + + # step 3: check that sparse and dense updates lead to the same results + assert_array_equal(labels, labels_csr) + assert_allclose(centers_new, centers_new_csr) + assert_allclose(old_inertia, old_inertia_csr) + assert_allclose(new_inertia, new_inertia_csr) + + +def _check_fitted_model(km): + # check that the number of clusters centers and distinct labels match + # the expectation + centers = km.cluster_centers_ + assert centers.shape == (n_clusters, n_features) + + labels = km.labels_ + assert np.unique(labels).shape[0] == n_clusters + + # check that the labels assignment are perfect (up to a permutation) + assert_allclose(v_measure_score(true_labels, labels), 1.0) + assert km.inertia_ > 0.0 + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, + ids=data_containers_ids, +) +@pytest.mark.parametrize( + "init", + ["random", "k-means++", centers, lambda X, k, random_state: centers], + ids=["random", "k-means++", "ndarray", "callable"], +) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_all_init(Estimator, input_data, init): + # Check KMeans and MiniBatchKMeans with all possible init. + n_init = 10 if isinstance(init, str) else 1 + km = Estimator( + init=init, n_clusters=n_clusters, random_state=42, n_init=n_init + ).fit(input_data) + _check_fitted_model(km) + + +@pytest.mark.parametrize( + "init", + ["random", "k-means++", centers, lambda X, k, random_state: centers], + ids=["random", "k-means++", "ndarray", "callable"], +) +def test_minibatch_kmeans_partial_fit_init(init): + # Check MiniBatchKMeans init with partial_fit + n_init = 10 if isinstance(init, str) else 1 + km = MiniBatchKMeans( + init=init, n_clusters=n_clusters, random_state=0, n_init=n_init + ) + for i in range(100): + # "random" init requires many batches to recover the true labels. + km.partial_fit(X) + _check_fitted_model(km) + + +@pytest.mark.parametrize( + "init, expected_n_init", + [ + ("k-means++", 1), + ("random", "default"), + ( + lambda X, n_clusters, random_state: random_state.uniform( + size=(n_clusters, X.shape[1]) + ), + "default", + ), + ("array-like", 1), + ], +) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_kmeans_init_auto_with_initial_centroids(Estimator, init, expected_n_init): + """Check that `n_init="auto"` chooses the right number of initializations. + Non-regression test for #26657: + https://github.com/scikit-learn/scikit-learn/pull/26657 + """ + n_sample, n_features, n_clusters = 100, 10, 5 + X = np.random.randn(n_sample, n_features) + if init == "array-like": + init = np.random.randn(n_clusters, n_features) + if expected_n_init == "default": + expected_n_init = 3 if Estimator is MiniBatchKMeans else 10 + + kmeans = Estimator(n_clusters=n_clusters, init=init, n_init="auto").fit(X) + assert kmeans._n_init == expected_n_init + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_fortran_aligned_data(Estimator, global_random_seed): + # Check that KMeans works with fortran-aligned data. + X_fortran = np.asfortranarray(X) + centers_fortran = np.asfortranarray(centers) + + km_c = Estimator( + n_clusters=n_clusters, init=centers, n_init=1, random_state=global_random_seed + ).fit(X) + km_f = Estimator( + n_clusters=n_clusters, + init=centers_fortran, + n_init=1, + random_state=global_random_seed, + ).fit(X_fortran) + assert_allclose(km_c.cluster_centers_, km_f.cluster_centers_) + assert_array_equal(km_c.labels_, km_f.labels_) + + +def test_minibatch_kmeans_verbose(): + # Check verbose mode of MiniBatchKMeans for better coverage. + km = MiniBatchKMeans(n_clusters=n_clusters, random_state=42, verbose=1) + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + km.fit(X) + finally: + sys.stdout = old_stdout + + +@pytest.mark.parametrize("algorithm", ["lloyd", "elkan"]) +@pytest.mark.parametrize("tol", [1e-2, 0]) +def test_kmeans_verbose(algorithm, tol, capsys): + # Check verbose mode of KMeans for better coverage. + X = np.random.RandomState(0).normal(size=(5000, 10)) + + KMeans( + algorithm=algorithm, + n_clusters=n_clusters, + random_state=42, + init="random", + n_init=1, + tol=tol, + verbose=1, + ).fit(X) + + captured = capsys.readouterr() + + assert re.search(r"Initialization complete", captured.out) + assert re.search(r"Iteration [0-9]+, inertia", captured.out) + + if tol == 0: + assert re.search(r"strict convergence", captured.out) + else: + assert re.search(r"center shift .* within tolerance", captured.out) + + +def test_minibatch_kmeans_warning_init_size(): + # Check that a warning is raised when init_size is smaller than n_clusters + with pytest.warns( + RuntimeWarning, match=r"init_size.* should be larger than n_clusters" + ): + MiniBatchKMeans(init_size=10, n_clusters=20).fit(X) + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_warning_n_init_precomputed_centers(Estimator): + # Check that a warning is raised when n_init > 1 and an array is passed for + # the init parameter. + with pytest.warns( + RuntimeWarning, + match="Explicit initial center position passed: performing only one init", + ): + Estimator(init=centers, n_clusters=n_clusters, n_init=10).fit(X) + + +def test_minibatch_sensible_reassign(global_random_seed): + # check that identical initial clusters are reassigned + # also a regression test for when there are more desired reassignments than + # samples. + zeroed_X, true_labels = make_blobs( + n_samples=100, centers=5, random_state=global_random_seed + ) + zeroed_X[::2, :] = 0 + + km = MiniBatchKMeans( + n_clusters=20, batch_size=10, random_state=global_random_seed, init="random" + ).fit(zeroed_X) + # there should not be too many exact zero cluster centers + assert km.cluster_centers_.any(axis=1).sum() > 10 + + # do the same with batch-size > X.shape[0] (regression test) + km = MiniBatchKMeans( + n_clusters=20, batch_size=200, random_state=global_random_seed, init="random" + ).fit(zeroed_X) + # there should not be too many exact zero cluster centers + assert km.cluster_centers_.any(axis=1).sum() > 10 + + # do the same with partial_fit API + km = MiniBatchKMeans(n_clusters=20, random_state=global_random_seed, init="random") + for i in range(100): + km.partial_fit(zeroed_X) + # there should not be too many exact zero cluster centers + assert km.cluster_centers_.any(axis=1).sum() > 10 + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, + ids=data_containers_ids, +) +def test_minibatch_reassign(input_data, global_random_seed): + # Check the reassignment part of the minibatch step with very high or very + # low reassignment ratio. + perfect_centers = np.empty((n_clusters, n_features)) + for i in range(n_clusters): + perfect_centers[i] = X[true_labels == i].mean(axis=0) + + sample_weight = np.ones(n_samples) + centers_new = np.empty_like(perfect_centers) + + # Give a perfect initialization, but a large reassignment_ratio, as a + # result many centers should be reassigned and the model should no longer + # be good + score_before = -_labels_inertia(input_data, sample_weight, perfect_centers, 1)[1] + + _mini_batch_step( + input_data, + sample_weight, + perfect_centers, + centers_new, + np.zeros(n_clusters), + np.random.RandomState(global_random_seed), + random_reassign=True, + reassignment_ratio=1, + ) + + score_after = -_labels_inertia(input_data, sample_weight, centers_new, 1)[1] + + assert score_before > score_after + + # Give a perfect initialization, with a small reassignment_ratio, + # no center should be reassigned. + _mini_batch_step( + input_data, + sample_weight, + perfect_centers, + centers_new, + np.zeros(n_clusters), + np.random.RandomState(global_random_seed), + random_reassign=True, + reassignment_ratio=1e-15, + ) + + assert_allclose(centers_new, perfect_centers) + + +def test_minibatch_with_many_reassignments(): + # Test for the case that the number of clusters to reassign is bigger + # than the batch_size. Run the test with 100 clusters and a batch_size of + # 10 because it turned out that these values ensure that the number of + # clusters to reassign is always bigger than the batch_size. + MiniBatchKMeans( + n_clusters=100, + batch_size=10, + init_size=n_samples, + random_state=42, + verbose=True, + ).fit(X) + + +def test_minibatch_kmeans_init_size(): + # Check the internal _init_size attribute of MiniBatchKMeans + + # default init size should be 3 * batch_size + km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1).fit(X) + assert km._init_size == 15 + + # if 3 * batch size < n_clusters, it should then be 3 * n_clusters + km = MiniBatchKMeans(n_clusters=10, batch_size=1, n_init=1).fit(X) + assert km._init_size == 30 + + # it should not be larger than n_samples + km = MiniBatchKMeans( + n_clusters=10, batch_size=5, n_init=1, init_size=n_samples + 1 + ).fit(X) + assert km._init_size == n_samples + + +@pytest.mark.parametrize("tol, max_no_improvement", [(1e-4, None), (0, 10)]) +def test_minibatch_declared_convergence(capsys, tol, max_no_improvement): + # Check convergence detection based on ewa batch inertia or on + # small center change. + X, _, centers = make_blobs(centers=3, random_state=0, return_centers=True) + + km = MiniBatchKMeans( + n_clusters=3, + init=centers, + batch_size=20, + tol=tol, + random_state=0, + max_iter=10, + n_init=1, + verbose=1, + max_no_improvement=max_no_improvement, + ) + + km.fit(X) + assert 1 < km.n_iter_ < 10 + + captured = capsys.readouterr() + if max_no_improvement is None: + assert "Converged (small centers change)" in captured.out + if tol == 0: + assert "Converged (lack of improvement in inertia)" in captured.out + + +def test_minibatch_iter_steps(): + # Check consistency of n_iter_ and n_steps_ attributes. + batch_size = 30 + n_samples = X.shape[0] + km = MiniBatchKMeans(n_clusters=3, batch_size=batch_size, random_state=0).fit(X) + + # n_iter_ is the number of started epochs + assert km.n_iter_ == np.ceil((km.n_steps_ * batch_size) / n_samples) + assert isinstance(km.n_iter_, int) + + # without stopping condition, max_iter should be reached + km = MiniBatchKMeans( + n_clusters=3, + batch_size=batch_size, + random_state=0, + tol=0, + max_no_improvement=None, + max_iter=10, + ).fit(X) + + assert km.n_iter_ == 10 + assert km.n_steps_ == (10 * n_samples) // batch_size + assert isinstance(km.n_steps_, int) + + +def test_kmeans_copyx(): + # Check that copy_x=False returns nearly equal X after de-centering. + my_X = X.copy() + km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42) + km.fit(my_X) + _check_fitted_model(km) + + # check that my_X is de-centered + assert_allclose(my_X, X) + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_score_max_iter(Estimator, global_random_seed): + # Check that fitting KMeans or MiniBatchKMeans with more iterations gives + # better score + X = np.random.RandomState(global_random_seed).randn(100, 10) + + km1 = Estimator(n_init=1, random_state=global_random_seed, max_iter=1) + s1 = km1.fit(X).score(X) + km2 = Estimator(n_init=1, random_state=global_random_seed, max_iter=10) + s2 = km2.fit(X).score(X) + assert s2 > s1 + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize( + "Estimator, algorithm", + [(KMeans, "lloyd"), (KMeans, "elkan"), (MiniBatchKMeans, None)], +) +@pytest.mark.parametrize("max_iter", [2, 100]) +def test_kmeans_predict( + Estimator, algorithm, array_constr, max_iter, global_dtype, global_random_seed +): + # Check the predict method and the equivalence between fit.predict and + # fit_predict. + X, _ = make_blobs( + n_samples=200, n_features=10, centers=10, random_state=global_random_seed + ) + X = array_constr(X, dtype=global_dtype) + + km = Estimator( + n_clusters=10, + init="random", + n_init=10, + max_iter=max_iter, + random_state=global_random_seed, + ) + if algorithm is not None: + km.set_params(algorithm=algorithm) + km.fit(X) + labels = km.labels_ + + # re-predict labels for training set using predict + pred = km.predict(X) + assert_array_equal(pred, labels) + + # re-predict labels for training set using fit_predict + pred = km.fit_predict(X) + assert_array_equal(pred, labels) + + # predict centroid labels + pred = km.predict(km.cluster_centers_) + assert_array_equal(pred, np.arange(10)) + + +@pytest.mark.parametrize("X_csr", X_as_any_csr) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_dense_sparse(Estimator, X_csr, global_random_seed): + # Check that the results are the same for dense and sparse input. + sample_weight = np.random.RandomState(global_random_seed).random_sample( + (n_samples,) + ) + km_dense = Estimator( + n_clusters=n_clusters, random_state=global_random_seed, n_init=1 + ) + km_dense.fit(X, sample_weight=sample_weight) + km_sparse = Estimator( + n_clusters=n_clusters, random_state=global_random_seed, n_init=1 + ) + km_sparse.fit(X_csr, sample_weight=sample_weight) + + assert_array_equal(km_dense.labels_, km_sparse.labels_) + assert_allclose(km_dense.cluster_centers_, km_sparse.cluster_centers_) + + +@pytest.mark.parametrize("X_csr", X_as_any_csr) +@pytest.mark.parametrize( + "init", ["random", "k-means++", centers], ids=["random", "k-means++", "ndarray"] +) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_predict_dense_sparse(Estimator, init, X_csr): + # check that models trained on sparse input also works for dense input at + # predict time and vice versa. + n_init = 10 if isinstance(init, str) else 1 + km = Estimator(n_clusters=n_clusters, init=init, n_init=n_init, random_state=0) + + km.fit(X_csr) + assert_array_equal(km.predict(X), km.labels_) + + km.fit(X) + assert_array_equal(km.predict(X_csr), km.labels_) + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize("dtype", [np.int32, np.int64]) +@pytest.mark.parametrize("init", ["k-means++", "ndarray"]) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_integer_input(Estimator, array_constr, dtype, init, global_random_seed): + # Check that KMeans and MiniBatchKMeans work with integer input. + X_dense = np.array([[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]) + X = array_constr(X_dense, dtype=dtype) + + n_init = 1 if init == "ndarray" else 10 + init = X_dense[:2] if init == "ndarray" else init + + km = Estimator( + n_clusters=2, init=init, n_init=n_init, random_state=global_random_seed + ) + if Estimator is MiniBatchKMeans: + km.set_params(batch_size=2) + + km.fit(X) + + # Internally integer input should be converted to float64 + assert km.cluster_centers_.dtype == np.float64 + + expected_labels = [0, 1, 1, 0, 0, 1] + assert_allclose(v_measure_score(km.labels_, expected_labels), 1.0) + + # Same with partial_fit (#14314) + if Estimator is MiniBatchKMeans: + km = clone(km).partial_fit(X) + assert km.cluster_centers_.dtype == np.float64 + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_transform(Estimator, global_random_seed): + # Check the transform method + km = Estimator(n_clusters=n_clusters, random_state=global_random_seed).fit(X) + + # Transorfming cluster_centers_ should return the pairwise distances + # between centers + Xt = km.transform(km.cluster_centers_) + assert_allclose(Xt, pairwise_distances(km.cluster_centers_)) + # In particular, diagonal must be 0 + assert_array_equal(Xt.diagonal(), np.zeros(n_clusters)) + + # Transorfming X should return the pairwise distances between X and the + # centers + Xt = km.transform(X) + assert_allclose(Xt, pairwise_distances(X, km.cluster_centers_)) + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_fit_transform(Estimator, global_random_seed): + # Check equivalence between fit.transform and fit_transform + X1 = Estimator(random_state=global_random_seed, n_init=1).fit(X).transform(X) + X2 = Estimator(random_state=global_random_seed, n_init=1).fit_transform(X) + assert_allclose(X1, X2) + + +def test_n_init(global_random_seed): + # Check that increasing the number of init increases the quality + previous_inertia = np.inf + for n_init in [1, 5, 10]: + # set max_iter=1 to avoid finding the global minimum and get the same + # inertia each time + km = KMeans( + n_clusters=n_clusters, + init="random", + n_init=n_init, + random_state=global_random_seed, + max_iter=1, + ).fit(X) + assert km.inertia_ <= previous_inertia + + +def test_k_means_function(global_random_seed): + # test calling the k_means function directly + cluster_centers, labels, inertia = k_means( + X, n_clusters=n_clusters, sample_weight=None, random_state=global_random_seed + ) + + assert cluster_centers.shape == (n_clusters, n_features) + assert np.unique(labels).shape[0] == n_clusters + + # check that the labels assignment are perfect (up to a permutation) + assert_allclose(v_measure_score(true_labels, labels), 1.0) + assert inertia > 0.0 + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, + ids=data_containers_ids, +) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_float_precision(Estimator, input_data, global_random_seed): + # Check that the results are the same for single and double precision. + km = Estimator(n_init=1, random_state=global_random_seed) + + inertia = {} + Xt = {} + centers = {} + labels = {} + + for dtype in [np.float64, np.float32]: + X = input_data.astype(dtype, copy=False) + km.fit(X) + + inertia[dtype] = km.inertia_ + Xt[dtype] = km.transform(X) + centers[dtype] = km.cluster_centers_ + labels[dtype] = km.labels_ + + # dtype of cluster centers has to be the dtype of the input data + assert km.cluster_centers_.dtype == dtype + + # same with partial_fit + if Estimator is MiniBatchKMeans: + km.partial_fit(X[0:3]) + assert km.cluster_centers_.dtype == dtype + + # compare arrays with low precision since the difference between 32 and + # 64 bit comes from an accumulation of rounding errors. + assert_allclose(inertia[np.float32], inertia[np.float64], rtol=1e-4) + assert_allclose(Xt[np.float32], Xt[np.float64], atol=Xt[np.float64].max() * 1e-4) + assert_allclose( + centers[np.float32], centers[np.float64], atol=centers[np.float64].max() * 1e-4 + ) + assert_array_equal(labels[np.float32], labels[np.float64]) + + +@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64]) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_centers_not_mutated(Estimator, dtype): + # Check that KMeans and MiniBatchKMeans won't mutate the user provided + # init centers silently even if input data and init centers have the same + # type. + X_new_type = X.astype(dtype, copy=False) + centers_new_type = centers.astype(dtype, copy=False) + + km = Estimator(init=centers_new_type, n_clusters=n_clusters, n_init=1) + km.fit(X_new_type) + + assert not np.may_share_memory(km.cluster_centers_, centers_new_type) + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, + ids=data_containers_ids, +) +def test_kmeans_init_fitted_centers(input_data): + # Check that starting fitting from a local optimum shouldn't change the + # solution + km1 = KMeans(n_clusters=n_clusters).fit(input_data) + km2 = KMeans(n_clusters=n_clusters, init=km1.cluster_centers_, n_init=1).fit( + input_data + ) + + assert_allclose(km1.cluster_centers_, km2.cluster_centers_) + + +def test_kmeans_warns_less_centers_than_unique_points(global_random_seed): + # Check KMeans when the number of found clusters is smaller than expected + X = np.asarray([[0, 0], [0, 1], [1, 0], [1, 0]]) # last point is duplicated + km = KMeans(n_clusters=4, random_state=global_random_seed) + + # KMeans should warn that fewer labels than cluster centers have been used + msg = ( + r"Number of distinct clusters \(3\) found smaller than " + r"n_clusters \(4\). Possibly due to duplicate points in X." + ) + with pytest.warns(ConvergenceWarning, match=msg): + km.fit(X) + # only three distinct points, so only three clusters + # can have points assigned to them + assert set(km.labels_) == set(range(3)) + + +def _sort_centers(centers): + return np.sort(centers, axis=0) + + +def test_weighted_vs_repeated(global_random_seed): + # Check that a sample weight of N should yield the same result as an N-fold + # repetition of the sample. Valid only if init is precomputed, otherwise + # rng produces different results. Not valid for MinibatchKMeans due to rng + # to extract minibatches. + sample_weight = np.random.RandomState(global_random_seed).randint( + 1, 5, size=n_samples + ) + X_repeat = np.repeat(X, sample_weight, axis=0) + + km = KMeans( + init=centers, n_init=1, n_clusters=n_clusters, random_state=global_random_seed + ) + + km_weighted = clone(km).fit(X, sample_weight=sample_weight) + repeated_labels = np.repeat(km_weighted.labels_, sample_weight) + km_repeated = clone(km).fit(X_repeat) + + assert_array_equal(km_repeated.labels_, repeated_labels) + assert_allclose(km_weighted.inertia_, km_repeated.inertia_) + assert_allclose( + _sort_centers(km_weighted.cluster_centers_), + _sort_centers(km_repeated.cluster_centers_), + ) + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, + ids=data_containers_ids, +) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_unit_weights_vs_no_weights(Estimator, input_data, global_random_seed): + # Check that not passing sample weights should be equivalent to passing + # sample weights all equal to one. + sample_weight = np.ones(n_samples) + + km = Estimator(n_clusters=n_clusters, random_state=global_random_seed, n_init=1) + km_none = clone(km).fit(input_data, sample_weight=None) + km_ones = clone(km).fit(input_data, sample_weight=sample_weight) + + assert_array_equal(km_none.labels_, km_ones.labels_) + assert_allclose(km_none.cluster_centers_, km_ones.cluster_centers_) + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, + ids=data_containers_ids, +) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_scaled_weights(Estimator, input_data, global_random_seed): + # Check that scaling all sample weights by a common factor + # shouldn't change the result + sample_weight = np.random.RandomState(global_random_seed).uniform(size=n_samples) + + km = Estimator(n_clusters=n_clusters, random_state=global_random_seed, n_init=1) + km_orig = clone(km).fit(input_data, sample_weight=sample_weight) + km_scaled = clone(km).fit(input_data, sample_weight=0.5 * sample_weight) + + assert_array_equal(km_orig.labels_, km_scaled.labels_) + assert_allclose(km_orig.cluster_centers_, km_scaled.cluster_centers_) + + +def test_kmeans_elkan_iter_attribute(): + # Regression test on bad n_iter_ value. Previous bug n_iter_ was one off + # it's right value (#11340). + km = KMeans(algorithm="elkan", max_iter=1).fit(X) + assert km.n_iter_ == 1 + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +def test_kmeans_empty_cluster_relocated(array_constr): + # check that empty clusters are correctly relocated when using sample + # weights (#13486) + X = array_constr([[-1], [1]]) + sample_weight = [1.9, 0.1] + init = np.array([[-1], [10]]) + + km = KMeans(n_clusters=2, init=init, n_init=1) + km.fit(X, sample_weight=sample_weight) + + assert len(set(km.labels_)) == 2 + assert_allclose(km.cluster_centers_, [[-1], [1]]) + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_result_equal_in_diff_n_threads(Estimator, global_random_seed): + # Check that KMeans/MiniBatchKMeans give the same results in parallel mode + # than in sequential mode. + rnd = np.random.RandomState(global_random_seed) + X = rnd.normal(size=(50, 10)) + + with threadpool_limits(limits=1, user_api="openmp"): + result_1 = ( + Estimator(n_clusters=n_clusters, random_state=global_random_seed) + .fit(X) + .labels_ + ) + with threadpool_limits(limits=2, user_api="openmp"): + result_2 = ( + Estimator(n_clusters=n_clusters, random_state=global_random_seed) + .fit(X) + .labels_ + ) + assert_array_equal(result_1, result_2) + + +def test_warning_elkan_1_cluster(): + # Check warning messages specific to KMeans + with pytest.warns( + RuntimeWarning, + match="algorithm='elkan' doesn't make sense for a single cluster", + ): + KMeans(n_clusters=1, algorithm="elkan").fit(X) + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize("algo", ["lloyd", "elkan"]) +def test_k_means_1_iteration(array_constr, algo, global_random_seed): + # check the results after a single iteration (E-step M-step E-step) by + # comparing against a pure python implementation. + X = np.random.RandomState(global_random_seed).uniform(size=(100, 5)) + init_centers = X[:5] + X = array_constr(X) + + def py_kmeans(X, init): + new_centers = init.copy() + labels = pairwise_distances_argmin(X, init) + for label in range(init.shape[0]): + new_centers[label] = X[labels == label].mean(axis=0) + labels = pairwise_distances_argmin(X, new_centers) + return labels, new_centers + + py_labels, py_centers = py_kmeans(X, init_centers) + + cy_kmeans = KMeans( + n_clusters=5, n_init=1, init=init_centers, algorithm=algo, max_iter=1 + ).fit(X) + cy_labels = cy_kmeans.labels_ + cy_centers = cy_kmeans.cluster_centers_ + + assert_array_equal(py_labels, cy_labels) + assert_allclose(py_centers, cy_centers) + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +@pytest.mark.parametrize("squared", [True, False]) +def test_euclidean_distance(dtype, squared, global_random_seed): + # Check that the _euclidean_(dense/sparse)_dense helpers produce correct + # results + rng = np.random.RandomState(global_random_seed) + a_sparse = sp.random( + 1, 100, density=0.5, format="csr", random_state=rng, dtype=dtype + ) + a_dense = a_sparse.toarray().reshape(-1) + b = rng.randn(100).astype(dtype, copy=False) + b_squared_norm = (b**2).sum() + + expected = ((a_dense - b) ** 2).sum() + expected = expected if squared else np.sqrt(expected) + + distance_dense_dense = _euclidean_dense_dense_wrapper(a_dense, b, squared) + distance_sparse_dense = _euclidean_sparse_dense_wrapper( + a_sparse.data, a_sparse.indices, b, b_squared_norm, squared + ) + + rtol = 1e-4 if dtype == np.float32 else 1e-7 + assert_allclose(distance_dense_dense, distance_sparse_dense, rtol=rtol) + assert_allclose(distance_dense_dense, expected, rtol=rtol) + assert_allclose(distance_sparse_dense, expected, rtol=rtol) + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_inertia(dtype, global_random_seed): + # Check that the _inertia_(dense/sparse) helpers produce correct results. + rng = np.random.RandomState(global_random_seed) + X_sparse = sp.random( + 100, 10, density=0.5, format="csr", random_state=rng, dtype=dtype + ) + X_dense = X_sparse.toarray() + sample_weight = rng.randn(100).astype(dtype, copy=False) + centers = rng.randn(5, 10).astype(dtype, copy=False) + labels = rng.randint(5, size=100, dtype=np.int32) + + distances = ((X_dense - centers[labels]) ** 2).sum(axis=1) + expected = np.sum(distances * sample_weight) + + inertia_dense = _inertia_dense(X_dense, sample_weight, centers, labels, n_threads=1) + inertia_sparse = _inertia_sparse( + X_sparse, sample_weight, centers, labels, n_threads=1 + ) + + rtol = 1e-4 if dtype == np.float32 else 1e-6 + assert_allclose(inertia_dense, inertia_sparse, rtol=rtol) + assert_allclose(inertia_dense, expected, rtol=rtol) + assert_allclose(inertia_sparse, expected, rtol=rtol) + + # Check the single_label parameter. + label = 1 + mask = labels == label + distances = ((X_dense[mask] - centers[label]) ** 2).sum(axis=1) + expected = np.sum(distances * sample_weight[mask]) + + inertia_dense = _inertia_dense( + X_dense, sample_weight, centers, labels, n_threads=1, single_label=label + ) + inertia_sparse = _inertia_sparse( + X_sparse, sample_weight, centers, labels, n_threads=1, single_label=label + ) + + assert_allclose(inertia_dense, inertia_sparse, rtol=rtol) + assert_allclose(inertia_dense, expected, rtol=rtol) + assert_allclose(inertia_sparse, expected, rtol=rtol) + + +@pytest.mark.parametrize("Klass, default_n_init", [(KMeans, 10), (MiniBatchKMeans, 3)]) +def test_n_init_auto(Klass, default_n_init): + est = Klass(n_init="auto", init="k-means++") + est.fit(X) + assert est._n_init == 1 + + est = Klass(n_init="auto", init="random") + est.fit(X) + assert est._n_init == 10 if Klass.__name__ == "KMeans" else 3 + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_sample_weight_unchanged(Estimator): + # Check that sample_weight is not modified in place by KMeans (#17204) + X = np.array([[1], [2], [4]]) + sample_weight = np.array([0.5, 0.2, 0.3]) + Estimator(n_clusters=2, random_state=0).fit(X, sample_weight=sample_weight) + + assert_array_equal(sample_weight, np.array([0.5, 0.2, 0.3])) + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +@pytest.mark.parametrize( + "param, match", + [ + ({"n_clusters": n_samples + 1}, r"n_samples.* should be >= n_clusters"), + ( + {"init": X[:2]}, + r"The shape of the initial centers .* does not match " + r"the number of clusters", + ), + ( + {"init": lambda X_, k, random_state: X_[:2]}, + r"The shape of the initial centers .* does not match " + r"the number of clusters", + ), + ( + {"init": X[:8, :2]}, + r"The shape of the initial centers .* does not match " + r"the number of features of the data", + ), + ( + {"init": lambda X_, k, random_state: X_[:8, :2]}, + r"The shape of the initial centers .* does not match " + r"the number of features of the data", + ), + ], +) +def test_wrong_params(Estimator, param, match): + # Check that error are raised with clear error message when wrong values + # are passed for the parameters + # Set n_init=1 by default to avoid warning with precomputed init + km = Estimator(n_init=1) + with pytest.raises(ValueError, match=match): + km.set_params(**param).fit(X) + + +@pytest.mark.parametrize( + "param, match", + [ + ( + {"x_squared_norms": X[:2]}, + r"The length of x_squared_norms .* should " + r"be equal to the length of n_samples", + ), + ], +) +def test_kmeans_plusplus_wrong_params(param, match): + with pytest.raises(ValueError, match=match): + kmeans_plusplus(X, n_clusters, **param) + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, +) +@pytest.mark.parametrize("dtype", [np.float64, np.float32]) +def test_kmeans_plusplus_output(input_data, dtype, global_random_seed): + # Check for the correct number of seeds and all positive values + data = input_data.astype(dtype) + centers, indices = kmeans_plusplus( + data, n_clusters, random_state=global_random_seed + ) + + # Check there are the correct number of indices and that all indices are + # positive and within the number of samples + assert indices.shape[0] == n_clusters + assert (indices >= 0).all() + assert (indices <= data.shape[0]).all() + + # Check for the correct number of seeds and that they are bound by the data + assert centers.shape[0] == n_clusters + assert (centers.max(axis=0) <= data.max(axis=0)).all() + assert (centers.min(axis=0) >= data.min(axis=0)).all() + + # Check that indices correspond to reported centers + # Use X for comparison rather than data, test still works against centers + # calculated with sparse data. + assert_allclose(X[indices].astype(dtype), centers) + + +@pytest.mark.parametrize("x_squared_norms", [row_norms(X, squared=True), None]) +def test_kmeans_plusplus_norms(x_squared_norms): + # Check that defining x_squared_norms returns the same as default=None. + centers, indices = kmeans_plusplus(X, n_clusters, x_squared_norms=x_squared_norms) + + assert_allclose(X[indices], centers) + + +def test_kmeans_plusplus_dataorder(global_random_seed): + # Check that memory layout does not effect result + centers_c, _ = kmeans_plusplus(X, n_clusters, random_state=global_random_seed) + + X_fortran = np.asfortranarray(X) + + centers_fortran, _ = kmeans_plusplus( + X_fortran, n_clusters, random_state=global_random_seed + ) + + assert_allclose(centers_c, centers_fortran) + + +def test_is_same_clustering(): + # Sanity check for the _is_same_clustering utility function + labels1 = np.array([1, 0, 0, 1, 2, 0, 2, 1], dtype=np.int32) + assert _is_same_clustering(labels1, labels1, 3) + + # these other labels represent the same clustering since we can retrieve the first + # labels by simply renaming the labels: 0 -> 1, 1 -> 2, 2 -> 0. + labels2 = np.array([0, 2, 2, 0, 1, 2, 1, 0], dtype=np.int32) + assert _is_same_clustering(labels1, labels2, 3) + + # these other labels do not represent the same clustering since not all ones are + # mapped to a same value + labels3 = np.array([1, 0, 0, 2, 2, 0, 2, 1], dtype=np.int32) + assert not _is_same_clustering(labels1, labels3, 3) + + +@pytest.mark.parametrize( + "kwargs", ({"init": np.str_("k-means++")}, {"init": [[0, 0], [1, 1]], "n_init": 1}) +) +def test_kmeans_with_array_like_or_np_scalar_init(kwargs): + """Check that init works with numpy scalar strings. + + Non-regression test for #21964. + """ + X = np.asarray([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=np.float64) + + clustering = KMeans(n_clusters=2, **kwargs) + # Does not raise + clustering.fit(X) + + +@pytest.mark.parametrize( + "Klass, method", + [(KMeans, "fit"), (MiniBatchKMeans, "fit"), (MiniBatchKMeans, "partial_fit")], +) +def test_feature_names_out(Klass, method): + """Check `feature_names_out` for `KMeans` and `MiniBatchKMeans`.""" + class_name = Klass.__name__.lower() + kmeans = Klass() + getattr(kmeans, method)(X) + n_clusters = kmeans.cluster_centers_.shape[0] + + names_out = kmeans.get_feature_names_out() + assert_array_equal([f"{class_name}{i}" for i in range(n_clusters)], names_out) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None]) +def test_predict_does_not_change_cluster_centers(csr_container): + """Check that predict does not change cluster centers. + + Non-regression test for gh-24253. + """ + X, _ = make_blobs(n_samples=200, n_features=10, centers=10, random_state=0) + if csr_container is not None: + X = csr_container(X) + + kmeans = KMeans() + y_pred1 = kmeans.fit_predict(X) + # Make cluster_centers readonly + kmeans.cluster_centers_ = create_memmap_backed_data(kmeans.cluster_centers_) + kmeans.labels_ = create_memmap_backed_data(kmeans.labels_) + + y_pred2 = kmeans.predict(X) + assert_array_equal(y_pred1, y_pred2) + + +@pytest.mark.parametrize("init", ["k-means++", "random"]) +def test_sample_weight_init(init, global_random_seed): + """Check that sample weight is used during init. + + `_init_centroids` is shared across all classes inheriting from _BaseKMeans so + it's enough to check for KMeans. + """ + rng = np.random.RandomState(global_random_seed) + X, _ = make_blobs( + n_samples=200, n_features=10, centers=10, random_state=global_random_seed + ) + x_squared_norms = row_norms(X, squared=True) + + kmeans = KMeans() + clusters_weighted = kmeans._init_centroids( + X=X, + x_squared_norms=x_squared_norms, + init=init, + sample_weight=rng.uniform(size=X.shape[0]), + n_centroids=5, + random_state=np.random.RandomState(global_random_seed), + ) + clusters = kmeans._init_centroids( + X=X, + x_squared_norms=x_squared_norms, + init=init, + sample_weight=np.ones(X.shape[0]), + n_centroids=5, + random_state=np.random.RandomState(global_random_seed), + ) + with pytest.raises(AssertionError): + assert_allclose(clusters_weighted, clusters) + + +@pytest.mark.parametrize("init", ["k-means++", "random"]) +def test_sample_weight_zero(init, global_random_seed): + """Check that if sample weight is 0, this sample won't be chosen. + + `_init_centroids` is shared across all classes inheriting from _BaseKMeans so + it's enough to check for KMeans. + """ + rng = np.random.RandomState(global_random_seed) + X, _ = make_blobs( + n_samples=100, n_features=5, centers=5, random_state=global_random_seed + ) + sample_weight = rng.uniform(size=X.shape[0]) + sample_weight[::2] = 0 + x_squared_norms = row_norms(X, squared=True) + + kmeans = KMeans() + clusters_weighted = kmeans._init_centroids( + X=X, + x_squared_norms=x_squared_norms, + init=init, + sample_weight=sample_weight, + n_centroids=10, + random_state=np.random.RandomState(global_random_seed), + ) + # No center should be one of the 0 sample weight point + # (i.e. be at a distance=0 from it) + d = euclidean_distances(X[::2], clusters_weighted) + assert not np.any(np.isclose(d, 0)) + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize("algorithm", ["lloyd", "elkan"]) +def test_relocating_with_duplicates(algorithm, array_constr): + """Check that kmeans stops when there are more centers than non-duplicate samples + + Non-regression test for issue: + https://github.com/scikit-learn/scikit-learn/issues/28055 + """ + X = np.array([[0, 0], [1, 1], [1, 1], [1, 0], [0, 1]]) + km = KMeans(n_clusters=5, init=X, algorithm=algorithm) + + msg = r"Number of distinct clusters \(4\) found smaller than n_clusters \(5\)" + with pytest.warns(ConvergenceWarning, match=msg): + km.fit(array_constr(X)) + + assert km.n_iter_ == 1 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_mean_shift.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_mean_shift.py new file mode 100644 index 0000000000000000000000000000000000000000..265c72d0c4ce1d009f8298e70dea902f2aa5d212 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_mean_shift.py @@ -0,0 +1,206 @@ +""" +Testing for mean shift clustering methods + +""" + +import warnings + +import numpy as np +import pytest + +from sklearn.cluster import MeanShift, estimate_bandwidth, get_bin_seeds, mean_shift +from sklearn.datasets import make_blobs +from sklearn.metrics import v_measure_score +from sklearn.utils._testing import assert_allclose, assert_array_equal + +n_clusters = 3 +centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 +X, _ = make_blobs( + n_samples=300, + n_features=2, + centers=centers, + cluster_std=0.4, + shuffle=True, + random_state=11, +) + + +def test_estimate_bandwidth(): + # Test estimate_bandwidth + bandwidth = estimate_bandwidth(X, n_samples=200) + assert 0.9 <= bandwidth <= 1.5 + + +def test_estimate_bandwidth_1sample(global_dtype): + # Test estimate_bandwidth when n_samples=1 and quantile<1, so that + # n_neighbors is set to 1. + bandwidth = estimate_bandwidth( + X.astype(global_dtype, copy=False), n_samples=1, quantile=0.3 + ) + + assert bandwidth.dtype == X.dtype + assert bandwidth == pytest.approx(0.0, abs=1e-5) + + +@pytest.mark.parametrize( + "bandwidth, cluster_all, expected, first_cluster_label", + [(1.2, True, 3, 0), (1.2, False, 4, -1)], +) +def test_mean_shift( + global_dtype, bandwidth, cluster_all, expected, first_cluster_label +): + # Test MeanShift algorithm + X_with_global_dtype = X.astype(global_dtype, copy=False) + ms = MeanShift(bandwidth=bandwidth, cluster_all=cluster_all) + labels = ms.fit(X_with_global_dtype).labels_ + labels_unique = np.unique(labels) + n_clusters_ = len(labels_unique) + assert n_clusters_ == expected + assert labels_unique[0] == first_cluster_label + assert ms.cluster_centers_.dtype == global_dtype + + cluster_centers, labels_mean_shift = mean_shift( + X_with_global_dtype, cluster_all=cluster_all + ) + labels_mean_shift_unique = np.unique(labels_mean_shift) + n_clusters_mean_shift = len(labels_mean_shift_unique) + assert n_clusters_mean_shift == expected + assert labels_mean_shift_unique[0] == first_cluster_label + assert cluster_centers.dtype == global_dtype + + +def test_parallel(global_dtype): + centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 + X, _ = make_blobs( + n_samples=50, + n_features=2, + centers=centers, + cluster_std=0.4, + shuffle=True, + random_state=11, + ) + + X = X.astype(global_dtype, copy=False) + + ms1 = MeanShift(n_jobs=2) + ms1.fit(X) + + ms2 = MeanShift() + ms2.fit(X) + + assert_allclose(ms1.cluster_centers_, ms2.cluster_centers_) + assert ms1.cluster_centers_.dtype == ms2.cluster_centers_.dtype + assert_array_equal(ms1.labels_, ms2.labels_) + + +def test_meanshift_predict(global_dtype): + # Test MeanShift.predict + ms = MeanShift(bandwidth=1.2) + X_with_global_dtype = X.astype(global_dtype, copy=False) + labels = ms.fit_predict(X_with_global_dtype) + labels2 = ms.predict(X_with_global_dtype) + assert_array_equal(labels, labels2) + + +def test_meanshift_all_orphans(): + # init away from the data, crash with a sensible warning + ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]]) + msg = "No point was within bandwidth=0.1" + with pytest.raises(ValueError, match=msg): + ms.fit( + X, + ) + + +def test_unfitted(): + # Non-regression: before fit, there should be not fitted attributes. + ms = MeanShift() + assert not hasattr(ms, "cluster_centers_") + assert not hasattr(ms, "labels_") + + +def test_cluster_intensity_tie(global_dtype): + X = np.array([[1, 1], [2, 1], [1, 0], [4, 7], [3, 5], [3, 6]], dtype=global_dtype) + c1 = MeanShift(bandwidth=2).fit(X) + + X = np.array([[4, 7], [3, 5], [3, 6], [1, 1], [2, 1], [1, 0]], dtype=global_dtype) + c2 = MeanShift(bandwidth=2).fit(X) + assert_array_equal(c1.labels_, [1, 1, 1, 0, 0, 0]) + assert_array_equal(c2.labels_, [0, 0, 0, 1, 1, 1]) + + +def test_bin_seeds(global_dtype): + # Test the bin seeding technique which can be used in the mean shift + # algorithm + # Data is just 6 points in the plane + X = np.array( + [[1.0, 1.0], [1.4, 1.4], [1.8, 1.2], [2.0, 1.0], [2.1, 1.1], [0.0, 0.0]], + dtype=global_dtype, + ) + + # With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be + # found + ground_truth = {(1.0, 1.0), (2.0, 1.0), (0.0, 0.0)} + test_bins = get_bin_seeds(X, 1, 1) + test_result = set(tuple(p) for p in test_bins) + assert len(ground_truth.symmetric_difference(test_result)) == 0 + + # With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be + # found + ground_truth = {(1.0, 1.0), (2.0, 1.0)} + test_bins = get_bin_seeds(X, 1, 2) + test_result = set(tuple(p) for p in test_bins) + assert len(ground_truth.symmetric_difference(test_result)) == 0 + + # With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found + # we bail and use the whole data here. + with warnings.catch_warnings(record=True): + test_bins = get_bin_seeds(X, 0.01, 1) + assert_allclose(test_bins, X) + + # tight clusters around [0, 0] and [1, 1], only get two bins + X, _ = make_blobs( + n_samples=100, + n_features=2, + centers=[[0, 0], [1, 1]], + cluster_std=0.1, + random_state=0, + ) + X = X.astype(global_dtype, copy=False) + test_bins = get_bin_seeds(X, 1) + assert_array_equal(test_bins, [[0, 0], [1, 1]]) + + +@pytest.mark.parametrize("max_iter", [1, 100]) +def test_max_iter(max_iter): + clusters1, _ = mean_shift(X, max_iter=max_iter) + ms = MeanShift(max_iter=max_iter).fit(X) + clusters2 = ms.cluster_centers_ + + assert ms.n_iter_ <= ms.max_iter + assert len(clusters1) == len(clusters2) + + for c1, c2 in zip(clusters1, clusters2): + assert np.allclose(c1, c2) + + +def test_mean_shift_zero_bandwidth(global_dtype): + # Check that mean shift works when the estimated bandwidth is 0. + X = np.array([1, 1, 1, 2, 2, 2, 3, 3], dtype=global_dtype).reshape(-1, 1) + + # estimate_bandwidth with default args returns 0 on this dataset + bandwidth = estimate_bandwidth(X) + assert bandwidth == 0 + + # get_bin_seeds with a 0 bin_size should return the dataset itself + assert get_bin_seeds(X, bin_size=bandwidth) is X + + # MeanShift with binning and a 0 estimated bandwidth should be equivalent + # to no binning. + ms_binning = MeanShift(bin_seeding=True, bandwidth=None).fit(X) + ms_nobinning = MeanShift(bin_seeding=False).fit(X) + expected_labels = np.array([0, 0, 0, 1, 1, 1, 2, 2]) + + assert v_measure_score(ms_binning.labels_, expected_labels) == pytest.approx(1) + assert v_measure_score(ms_nobinning.labels_, expected_labels) == pytest.approx(1) + assert_allclose(ms_binning.cluster_centers_, ms_nobinning.cluster_centers_) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_optics.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_optics.py new file mode 100644 index 0000000000000000000000000000000000000000..d6e415e114ee1ce42aaee2b07c9500b8b38eaea9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_optics.py @@ -0,0 +1,837 @@ +# Authors: Shane Grigsby +# Adrin Jalali +# License: BSD 3 clause +import warnings + +import numpy as np +import pytest + +from sklearn.cluster import DBSCAN, OPTICS +from sklearn.cluster._optics import _extend_region, _extract_xi_labels +from sklearn.cluster.tests.common import generate_clustered_data +from sklearn.datasets import make_blobs +from sklearn.exceptions import DataConversionWarning, EfficiencyWarning +from sklearn.metrics.cluster import contingency_matrix +from sklearn.metrics.pairwise import pairwise_distances +from sklearn.utils import shuffle +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import CSR_CONTAINERS + +rng = np.random.RandomState(0) +n_points_per_cluster = 10 +C1 = [-5, -2] + 0.8 * rng.randn(n_points_per_cluster, 2) +C2 = [4, -1] + 0.1 * rng.randn(n_points_per_cluster, 2) +C3 = [1, -2] + 0.2 * rng.randn(n_points_per_cluster, 2) +C4 = [-2, 3] + 0.3 * rng.randn(n_points_per_cluster, 2) +C5 = [3, -2] + 1.6 * rng.randn(n_points_per_cluster, 2) +C6 = [5, 6] + 2 * rng.randn(n_points_per_cluster, 2) +X = np.vstack((C1, C2, C3, C4, C5, C6)) + + +@pytest.mark.parametrize( + ("r_plot", "end"), + [ + [[10, 8.9, 8.8, 8.7, 7, 10], 3], + [[10, 8.9, 8.8, 8.7, 8.6, 7, 10], 0], + [[10, 8.9, 8.8, 8.7, 7, 6, np.inf], 4], + [[10, 8.9, 8.8, 8.7, 7, 6, np.inf], 4], + ], +) +def test_extend_downward(r_plot, end): + r_plot = np.array(r_plot) + ratio = r_plot[:-1] / r_plot[1:] + steep_downward = ratio >= 1 / 0.9 + upward = ratio < 1 + + e = _extend_region(steep_downward, upward, 0, 2) + assert e == end + + +@pytest.mark.parametrize( + ("r_plot", "end"), + [ + [[1, 2, 2.1, 2.2, 4, 8, 8, np.inf], 6], + [[1, 2, 2.1, 2.2, 2.3, 4, 8, 8, np.inf], 0], + [[1, 2, 2.1, 2, np.inf], 0], + [[1, 2, 2.1, np.inf], 2], + ], +) +def test_extend_upward(r_plot, end): + r_plot = np.array(r_plot) + ratio = r_plot[:-1] / r_plot[1:] + steep_upward = ratio <= 0.9 + downward = ratio > 1 + + e = _extend_region(steep_upward, downward, 0, 2) + assert e == end + + +@pytest.mark.parametrize( + ("ordering", "clusters", "expected"), + [ + [[0, 1, 2, 3], [[0, 1], [2, 3]], [0, 0, 1, 1]], + [[0, 1, 2, 3], [[0, 1], [3, 3]], [0, 0, -1, 1]], + [[0, 1, 2, 3], [[0, 1], [3, 3], [0, 3]], [0, 0, -1, 1]], + [[3, 1, 2, 0], [[0, 1], [3, 3], [0, 3]], [1, 0, -1, 0]], + ], +) +def test_the_extract_xi_labels(ordering, clusters, expected): + labels = _extract_xi_labels(ordering, clusters) + + assert_array_equal(labels, expected) + + +def test_extract_xi(global_dtype): + # small and easy test (no clusters around other clusters) + # but with a clear noise data. + rng = np.random.RandomState(0) + n_points_per_cluster = 5 + + C1 = [-5, -2] + 0.8 * rng.randn(n_points_per_cluster, 2) + C2 = [4, -1] + 0.1 * rng.randn(n_points_per_cluster, 2) + C3 = [1, -2] + 0.2 * rng.randn(n_points_per_cluster, 2) + C4 = [-2, 3] + 0.3 * rng.randn(n_points_per_cluster, 2) + C5 = [3, -2] + 0.6 * rng.randn(n_points_per_cluster, 2) + C6 = [5, 6] + 0.2 * rng.randn(n_points_per_cluster, 2) + + X = np.vstack((C1, C2, C3, C4, C5, np.array([[100, 100]]), C6)).astype( + global_dtype, copy=False + ) + expected_labels = np.r_[[2] * 5, [0] * 5, [1] * 5, [3] * 5, [1] * 5, -1, [4] * 5] + X, expected_labels = shuffle(X, expected_labels, random_state=rng) + + clust = OPTICS( + min_samples=3, min_cluster_size=2, max_eps=20, cluster_method="xi", xi=0.4 + ).fit(X) + assert_array_equal(clust.labels_, expected_labels) + + # check float min_samples and min_cluster_size + clust = OPTICS( + min_samples=0.1, min_cluster_size=0.08, max_eps=20, cluster_method="xi", xi=0.4 + ).fit(X) + assert_array_equal(clust.labels_, expected_labels) + + X = np.vstack((C1, C2, C3, C4, C5, np.array([[100, 100]] * 2), C6)).astype( + global_dtype, copy=False + ) + expected_labels = np.r_[ + [1] * 5, [3] * 5, [2] * 5, [0] * 5, [2] * 5, -1, -1, [4] * 5 + ] + X, expected_labels = shuffle(X, expected_labels, random_state=rng) + + clust = OPTICS( + min_samples=3, min_cluster_size=3, max_eps=20, cluster_method="xi", xi=0.3 + ).fit(X) + # this may fail if the predecessor correction is not at work! + assert_array_equal(clust.labels_, expected_labels) + + C1 = [[0, 0], [0, 0.1], [0, -0.1], [0.1, 0]] + C2 = [[10, 10], [10, 9], [10, 11], [9, 10]] + C3 = [[100, 100], [100, 90], [100, 110], [90, 100]] + X = np.vstack((C1, C2, C3)).astype(global_dtype, copy=False) + expected_labels = np.r_[[0] * 4, [1] * 4, [2] * 4] + X, expected_labels = shuffle(X, expected_labels, random_state=rng) + + clust = OPTICS( + min_samples=2, min_cluster_size=2, max_eps=np.inf, cluster_method="xi", xi=0.04 + ).fit(X) + assert_array_equal(clust.labels_, expected_labels) + + +def test_cluster_hierarchy_(global_dtype): + rng = np.random.RandomState(0) + n_points_per_cluster = 100 + C1 = [0, 0] + 2 * rng.randn(n_points_per_cluster, 2).astype( + global_dtype, copy=False + ) + C2 = [0, 0] + 50 * rng.randn(n_points_per_cluster, 2).astype( + global_dtype, copy=False + ) + X = np.vstack((C1, C2)) + X = shuffle(X, random_state=0) + + clusters = OPTICS(min_samples=20, xi=0.1).fit(X).cluster_hierarchy_ + assert clusters.shape == (2, 2) + diff = np.sum(clusters - np.array([[0, 99], [0, 199]])) + assert diff / len(X) < 0.05 + + +@pytest.mark.parametrize( + "csr_container, metric", + [(None, "minkowski")] + [(container, "euclidean") for container in CSR_CONTAINERS], +) +def test_correct_number_of_clusters(metric, csr_container): + # in 'auto' mode + + n_clusters = 3 + X = generate_clustered_data(n_clusters=n_clusters) + # Parameters chosen specifically for this task. + # Compute OPTICS + clust = OPTICS(max_eps=5.0 * 6.0, min_samples=4, xi=0.1, metric=metric) + clust.fit(csr_container(X) if csr_container is not None else X) + # number of clusters, ignoring noise if present + n_clusters_1 = len(set(clust.labels_)) - int(-1 in clust.labels_) + assert n_clusters_1 == n_clusters + + # check attribute types and sizes + assert clust.labels_.shape == (len(X),) + assert clust.labels_.dtype.kind == "i" + + assert clust.reachability_.shape == (len(X),) + assert clust.reachability_.dtype.kind == "f" + + assert clust.core_distances_.shape == (len(X),) + assert clust.core_distances_.dtype.kind == "f" + + assert clust.ordering_.shape == (len(X),) + assert clust.ordering_.dtype.kind == "i" + assert set(clust.ordering_) == set(range(len(X))) + + +def test_minimum_number_of_sample_check(): + # test that we check a minimum number of samples + msg = "min_samples must be no greater than" + + # Compute OPTICS + X = [[1, 1]] + clust = OPTICS(max_eps=5.0 * 0.3, min_samples=10, min_cluster_size=1.0) + + # Run the fit + with pytest.raises(ValueError, match=msg): + clust.fit(X) + + +def test_bad_extract(): + # Test an extraction of eps too close to original eps + msg = "Specify an epsilon smaller than 0.15. Got 0.3." + centers = [[1, 1], [-1, -1], [1, -1]] + X, labels_true = make_blobs( + n_samples=750, centers=centers, cluster_std=0.4, random_state=0 + ) + + # Compute OPTICS + clust = OPTICS(max_eps=5.0 * 0.03, cluster_method="dbscan", eps=0.3, min_samples=10) + with pytest.raises(ValueError, match=msg): + clust.fit(X) + + +def test_bad_reachability(): + msg = "All reachability values are inf. Set a larger max_eps." + centers = [[1, 1], [-1, -1], [1, -1]] + X, labels_true = make_blobs( + n_samples=750, centers=centers, cluster_std=0.4, random_state=0 + ) + + with pytest.warns(UserWarning, match=msg): + clust = OPTICS(max_eps=5.0 * 0.003, min_samples=10, eps=0.015) + clust.fit(X) + + +def test_nowarn_if_metric_bool_data_bool(): + # make sure no warning is raised if metric and data are both boolean + # non-regression test for + # https://github.com/scikit-learn/scikit-learn/issues/18996 + + pairwise_metric = "rogerstanimoto" + X = np.random.randint(2, size=(5, 2), dtype=bool) + + with warnings.catch_warnings(): + warnings.simplefilter("error", DataConversionWarning) + + OPTICS(metric=pairwise_metric).fit(X) + + +def test_warn_if_metric_bool_data_no_bool(): + # make sure a *single* conversion warning is raised if metric is boolean + # but data isn't + # non-regression test for + # https://github.com/scikit-learn/scikit-learn/issues/18996 + + pairwise_metric = "rogerstanimoto" + X = np.random.randint(2, size=(5, 2), dtype=np.int32) + msg = f"Data will be converted to boolean for metric {pairwise_metric}" + + with pytest.warns(DataConversionWarning, match=msg) as warn_record: + OPTICS(metric=pairwise_metric).fit(X) + assert len(warn_record) == 1 + + +def test_nowarn_if_metric_no_bool(): + # make sure no conversion warning is raised if + # metric isn't boolean, no matter what the data type is + pairwise_metric = "minkowski" + X_bool = np.random.randint(2, size=(5, 2), dtype=bool) + X_num = np.random.randint(2, size=(5, 2), dtype=np.int32) + + with warnings.catch_warnings(): + warnings.simplefilter("error", DataConversionWarning) + + # fit boolean data + OPTICS(metric=pairwise_metric).fit(X_bool) + # fit numeric data + OPTICS(metric=pairwise_metric).fit(X_num) + + +def test_close_extract(): + # Test extract where extraction eps is close to scaled max_eps + + centers = [[1, 1], [-1, -1], [1, -1]] + X, labels_true = make_blobs( + n_samples=750, centers=centers, cluster_std=0.4, random_state=0 + ) + + # Compute OPTICS + clust = OPTICS(max_eps=1.0, cluster_method="dbscan", eps=0.3, min_samples=10).fit(X) + # Cluster ordering starts at 0; max cluster label = 2 is 3 clusters + assert max(clust.labels_) == 2 + + +@pytest.mark.parametrize("eps", [0.1, 0.3, 0.5]) +@pytest.mark.parametrize("min_samples", [3, 10, 20]) +@pytest.mark.parametrize( + "csr_container, metric", + [(None, "minkowski"), (None, "euclidean")] + + [(container, "euclidean") for container in CSR_CONTAINERS], +) +def test_dbscan_optics_parity(eps, min_samples, metric, global_dtype, csr_container): + # Test that OPTICS clustering labels are <= 5% difference of DBSCAN + + centers = [[1, 1], [-1, -1], [1, -1]] + X, labels_true = make_blobs( + n_samples=150, centers=centers, cluster_std=0.4, random_state=0 + ) + X = csr_container(X) if csr_container is not None else X + + X = X.astype(global_dtype, copy=False) + + # calculate optics with dbscan extract at 0.3 epsilon + op = OPTICS( + min_samples=min_samples, cluster_method="dbscan", eps=eps, metric=metric + ).fit(X) + + # calculate dbscan labels + db = DBSCAN(eps=eps, min_samples=min_samples).fit(X) + + contingency = contingency_matrix(db.labels_, op.labels_) + agree = min( + np.sum(np.max(contingency, axis=0)), np.sum(np.max(contingency, axis=1)) + ) + disagree = X.shape[0] - agree + + percent_mismatch = np.round((disagree - 1) / X.shape[0], 2) + + # verify label mismatch is <= 5% labels + assert percent_mismatch <= 0.05 + + +def test_min_samples_edge_case(global_dtype): + C1 = [[0, 0], [0, 0.1], [0, -0.1]] + C2 = [[10, 10], [10, 9], [10, 11]] + C3 = [[100, 100], [100, 96], [100, 106]] + X = np.vstack((C1, C2, C3)).astype(global_dtype, copy=False) + + expected_labels = np.r_[[0] * 3, [1] * 3, [2] * 3] + clust = OPTICS(min_samples=3, max_eps=7, cluster_method="xi", xi=0.04).fit(X) + assert_array_equal(clust.labels_, expected_labels) + + expected_labels = np.r_[[0] * 3, [1] * 3, [-1] * 3] + clust = OPTICS(min_samples=3, max_eps=3, cluster_method="xi", xi=0.04).fit(X) + assert_array_equal(clust.labels_, expected_labels) + + expected_labels = np.r_[[-1] * 9] + with pytest.warns(UserWarning, match="All reachability values"): + clust = OPTICS(min_samples=4, max_eps=3, cluster_method="xi", xi=0.04).fit(X) + assert_array_equal(clust.labels_, expected_labels) + + +# try arbitrary minimum sizes +@pytest.mark.parametrize("min_cluster_size", range(2, X.shape[0] // 10, 23)) +def test_min_cluster_size(min_cluster_size, global_dtype): + redX = X[::2].astype(global_dtype, copy=False) # reduce for speed + clust = OPTICS(min_samples=9, min_cluster_size=min_cluster_size).fit(redX) + cluster_sizes = np.bincount(clust.labels_[clust.labels_ != -1]) + if cluster_sizes.size: + assert min(cluster_sizes) >= min_cluster_size + # check behaviour is the same when min_cluster_size is a fraction + clust_frac = OPTICS( + min_samples=9, + min_cluster_size=min_cluster_size / redX.shape[0], + ) + clust_frac.fit(redX) + assert_array_equal(clust.labels_, clust_frac.labels_) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_min_cluster_size_invalid2(csr_container): + clust = OPTICS(min_cluster_size=len(X) + 1) + with pytest.raises(ValueError, match="must be no greater than the "): + clust.fit(X) + + clust = OPTICS(min_cluster_size=len(X) + 1, metric="euclidean") + with pytest.raises(ValueError, match="must be no greater than the "): + clust.fit(csr_container(X)) + + +def test_processing_order(): + # Ensure that we consider all unprocessed points, + # not only direct neighbors. when picking the next point. + Y = [[0], [10], [-10], [25]] + + clust = OPTICS(min_samples=3, max_eps=15).fit(Y) + assert_array_equal(clust.reachability_, [np.inf, 10, 10, 15]) + assert_array_equal(clust.core_distances_, [10, 15, np.inf, np.inf]) + assert_array_equal(clust.ordering_, [0, 1, 2, 3]) + + +def test_compare_to_ELKI(): + # Expected values, computed with (future) ELKI 0.7.5 using: + # java -jar elki.jar cli -dbc.in csv -dbc.filter FixedDBIDsFilter + # -algorithm clustering.optics.OPTICSHeap -optics.minpts 5 + # where the FixedDBIDsFilter gives 0-indexed ids. + r1 = [ + np.inf, + 1.0574896366427478, + 0.7587934993548423, + 0.7290174038973836, + 0.7290174038973836, + 0.7290174038973836, + 0.6861627576116127, + 0.7587934993548423, + 0.9280118450166668, + 1.1748022534146194, + 3.3355455741292257, + 0.49618389254482587, + 0.2552805046961355, + 0.2552805046961355, + 0.24944622248445714, + 0.24944622248445714, + 0.24944622248445714, + 0.2552805046961355, + 0.2552805046961355, + 0.3086779122185853, + 4.163024452756142, + 1.623152630340929, + 0.45315840475822655, + 0.25468325192031926, + 0.2254004358159971, + 0.18765711877083036, + 0.1821471333893275, + 0.1821471333893275, + 0.18765711877083036, + 0.18765711877083036, + 0.2240202988740153, + 1.154337614548715, + 1.342604473837069, + 1.323308536402633, + 0.8607514948648837, + 0.27219111215810565, + 0.13260875220533205, + 0.13260875220533205, + 0.09890587675958984, + 0.09890587675958984, + 0.13548790801634494, + 0.1575483940837384, + 0.17515137170530226, + 0.17575920159442388, + 0.27219111215810565, + 0.6101447895405373, + 1.3189208094864302, + 1.323308536402633, + 2.2509184159764577, + 2.4517810628594527, + 3.675977064404973, + 3.8264795626020365, + 2.9130735341510614, + 2.9130735341510614, + 2.9130735341510614, + 2.9130735341510614, + 2.8459300127258036, + 2.8459300127258036, + 2.8459300127258036, + 3.0321982337972537, + ] + o1 = [ + 0, + 3, + 6, + 4, + 7, + 8, + 2, + 9, + 5, + 1, + 31, + 30, + 32, + 34, + 33, + 38, + 39, + 35, + 37, + 36, + 44, + 21, + 23, + 24, + 22, + 25, + 27, + 29, + 26, + 28, + 20, + 40, + 45, + 46, + 10, + 15, + 11, + 13, + 17, + 19, + 18, + 12, + 16, + 14, + 47, + 49, + 43, + 48, + 42, + 41, + 53, + 57, + 51, + 52, + 56, + 59, + 54, + 55, + 58, + 50, + ] + p1 = [ + -1, + 0, + 3, + 6, + 6, + 6, + 8, + 3, + 7, + 5, + 1, + 31, + 30, + 30, + 34, + 34, + 34, + 32, + 32, + 37, + 36, + 44, + 21, + 23, + 24, + 22, + 25, + 25, + 22, + 22, + 22, + 21, + 40, + 45, + 46, + 10, + 15, + 15, + 13, + 13, + 15, + 11, + 19, + 15, + 10, + 47, + 12, + 45, + 14, + 43, + 42, + 53, + 57, + 57, + 57, + 57, + 59, + 59, + 59, + 58, + ] + + # Tests against known extraction array + # Does NOT work with metric='euclidean', because sklearn euclidean has + # worse numeric precision. 'minkowski' is slower but more accurate. + clust1 = OPTICS(min_samples=5).fit(X) + + assert_array_equal(clust1.ordering_, np.array(o1)) + assert_array_equal(clust1.predecessor_[clust1.ordering_], np.array(p1)) + assert_allclose(clust1.reachability_[clust1.ordering_], np.array(r1)) + # ELKI currently does not print the core distances (which are not used much + # in literature, but we can at least ensure to have this consistency: + for i in clust1.ordering_[1:]: + assert clust1.reachability_[i] >= clust1.core_distances_[clust1.predecessor_[i]] + + # Expected values, computed with (future) ELKI 0.7.5 using + r2 = [ + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + 0.27219111215810565, + 0.13260875220533205, + 0.13260875220533205, + 0.09890587675958984, + 0.09890587675958984, + 0.13548790801634494, + 0.1575483940837384, + 0.17515137170530226, + 0.17575920159442388, + 0.27219111215810565, + 0.4928068613197889, + np.inf, + 0.2666183922512113, + 0.18765711877083036, + 0.1821471333893275, + 0.1821471333893275, + 0.1821471333893275, + 0.18715928772277457, + 0.18765711877083036, + 0.18765711877083036, + 0.25468325192031926, + np.inf, + 0.2552805046961355, + 0.2552805046961355, + 0.24944622248445714, + 0.24944622248445714, + 0.24944622248445714, + 0.2552805046961355, + 0.2552805046961355, + 0.3086779122185853, + 0.34466409325984865, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + ] + o2 = [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 15, + 11, + 13, + 17, + 19, + 18, + 12, + 16, + 14, + 47, + 46, + 20, + 22, + 25, + 23, + 27, + 29, + 24, + 26, + 28, + 21, + 30, + 32, + 34, + 33, + 38, + 39, + 35, + 37, + 36, + 31, + 40, + 41, + 42, + 43, + 44, + 45, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + ] + p2 = [ + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + 10, + 15, + 15, + 13, + 13, + 15, + 11, + 19, + 15, + 10, + 47, + -1, + 20, + 22, + 25, + 25, + 25, + 25, + 22, + 22, + 23, + -1, + 30, + 30, + 34, + 34, + 34, + 32, + 32, + 37, + 38, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + ] + clust2 = OPTICS(min_samples=5, max_eps=0.5).fit(X) + + assert_array_equal(clust2.ordering_, np.array(o2)) + assert_array_equal(clust2.predecessor_[clust2.ordering_], np.array(p2)) + assert_allclose(clust2.reachability_[clust2.ordering_], np.array(r2)) + + index = np.where(clust1.core_distances_ <= 0.5)[0] + assert_allclose(clust1.core_distances_[index], clust2.core_distances_[index]) + + +def test_extract_dbscan(global_dtype): + # testing an easy dbscan case. Not including clusters with different + # densities. + rng = np.random.RandomState(0) + n_points_per_cluster = 20 + C1 = [-5, -2] + 0.2 * rng.randn(n_points_per_cluster, 2) + C2 = [4, -1] + 0.2 * rng.randn(n_points_per_cluster, 2) + C3 = [1, 2] + 0.2 * rng.randn(n_points_per_cluster, 2) + C4 = [-2, 3] + 0.2 * rng.randn(n_points_per_cluster, 2) + X = np.vstack((C1, C2, C3, C4)).astype(global_dtype, copy=False) + + clust = OPTICS(cluster_method="dbscan", eps=0.5).fit(X) + assert_array_equal(np.sort(np.unique(clust.labels_)), [0, 1, 2, 3]) + + +@pytest.mark.parametrize("csr_container", [None] + CSR_CONTAINERS) +def test_precomputed_dists(global_dtype, csr_container): + redX = X[::2].astype(global_dtype, copy=False) + dists = pairwise_distances(redX, metric="euclidean") + dists = csr_container(dists) if csr_container is not None else dists + with warnings.catch_warnings(): + warnings.simplefilter("ignore", EfficiencyWarning) + clust1 = OPTICS(min_samples=10, algorithm="brute", metric="precomputed").fit( + dists + ) + clust2 = OPTICS(min_samples=10, algorithm="brute", metric="euclidean").fit(redX) + + assert_allclose(clust1.reachability_, clust2.reachability_) + assert_array_equal(clust1.labels_, clust2.labels_) + + +def test_optics_predecessor_correction_ordering(): + """Check that cluster correction using predecessor is working as expected. + + In the following example, the predecessor correction was not working properly + since it was not using the right indices. + + This non-regression test check that reordering the data does not change the results. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/26324 + """ + X_1 = np.array([1, 2, 3, 1, 8, 8, 7, 100]).reshape(-1, 1) + reorder = [0, 1, 2, 4, 5, 6, 7, 3] + X_2 = X_1[reorder] + + optics_1 = OPTICS(min_samples=3, metric="euclidean").fit(X_1) + optics_2 = OPTICS(min_samples=3, metric="euclidean").fit(X_2) + + assert_array_equal(optics_1.labels_[reorder], optics_2.labels_) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_spectral.py b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..682df64044bf9d97cd4a16f8f69a32f41084d3f1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/cluster/tests/test_spectral.py @@ -0,0 +1,334 @@ +"""Testing for Spectral Clustering methods""" +import pickle +import re + +import numpy as np +import pytest +from scipy.linalg import LinAlgError + +from sklearn.cluster import SpectralClustering, spectral_clustering +from sklearn.cluster._spectral import cluster_qr, discretize +from sklearn.datasets import make_blobs +from sklearn.feature_extraction import img_to_graph +from sklearn.metrics import adjusted_rand_score +from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel +from sklearn.neighbors import NearestNeighbors +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_array_equal +from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS + +try: + from pyamg import smoothed_aggregation_solver # noqa + + amg_loaded = True +except ImportError: + amg_loaded = False + +centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 +X, _ = make_blobs( + n_samples=60, + n_features=2, + centers=centers, + cluster_std=0.4, + shuffle=True, + random_state=0, +) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +@pytest.mark.parametrize("eigen_solver", ("arpack", "lobpcg")) +@pytest.mark.parametrize("assign_labels", ("kmeans", "discretize", "cluster_qr")) +def test_spectral_clustering(eigen_solver, assign_labels, csr_container): + S = np.array( + [ + [1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0], + [1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0], + [1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0], + [0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], + ] + ) + + for mat in (S, csr_container(S)): + model = SpectralClustering( + random_state=0, + n_clusters=2, + affinity="precomputed", + eigen_solver=eigen_solver, + assign_labels=assign_labels, + ).fit(mat) + labels = model.labels_ + if labels[0] == 0: + labels = 1 - labels + + assert adjusted_rand_score(labels, [1, 1, 1, 0, 0, 0, 0]) == 1 + + model_copy = pickle.loads(pickle.dumps(model)) + assert model_copy.n_clusters == model.n_clusters + assert model_copy.eigen_solver == model.eigen_solver + assert_array_equal(model_copy.labels_, model.labels_) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +@pytest.mark.parametrize("assign_labels", ("kmeans", "discretize", "cluster_qr")) +def test_spectral_clustering_sparse(assign_labels, coo_container): + X, y = make_blobs( + n_samples=20, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 + ) + + S = rbf_kernel(X, gamma=1) + S = np.maximum(S - 1e-4, 0) + S = coo_container(S) + + labels = ( + SpectralClustering( + random_state=0, + n_clusters=2, + affinity="precomputed", + assign_labels=assign_labels, + ) + .fit(S) + .labels_ + ) + assert adjusted_rand_score(y, labels) == 1 + + +def test_precomputed_nearest_neighbors_filtering(): + # Test precomputed graph filtering when containing too many neighbors + X, y = make_blobs( + n_samples=200, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 + ) + + n_neighbors = 2 + results = [] + for additional_neighbors in [0, 10]: + nn = NearestNeighbors(n_neighbors=n_neighbors + additional_neighbors).fit(X) + graph = nn.kneighbors_graph(X, mode="connectivity") + labels = ( + SpectralClustering( + random_state=0, + n_clusters=2, + affinity="precomputed_nearest_neighbors", + n_neighbors=n_neighbors, + ) + .fit(graph) + .labels_ + ) + results.append(labels) + + assert_array_equal(results[0], results[1]) + + +def test_affinities(): + # Note: in the following, random_state has been selected to have + # a dataset that yields a stable eigen decomposition both when built + # on OSX and Linux + X, y = make_blobs( + n_samples=20, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 + ) + # nearest neighbors affinity + sp = SpectralClustering(n_clusters=2, affinity="nearest_neighbors", random_state=0) + with pytest.warns(UserWarning, match="not fully connected"): + sp.fit(X) + assert adjusted_rand_score(y, sp.labels_) == 1 + + sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0) + labels = sp.fit(X).labels_ + assert adjusted_rand_score(y, labels) == 1 + + X = check_random_state(10).rand(10, 5) * 10 + + kernels_available = kernel_metrics() + for kern in kernels_available: + # Additive chi^2 gives a negative similarity matrix which + # doesn't make sense for spectral clustering + if kern != "additive_chi2": + sp = SpectralClustering(n_clusters=2, affinity=kern, random_state=0) + labels = sp.fit(X).labels_ + assert (X.shape[0],) == labels.shape + + sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1, random_state=0) + labels = sp.fit(X).labels_ + assert (X.shape[0],) == labels.shape + + def histogram(x, y, **kwargs): + # Histogram kernel implemented as a callable. + assert kwargs == {} # no kernel_params that we didn't ask for + return np.minimum(x, y).sum() + + sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0) + labels = sp.fit(X).labels_ + assert (X.shape[0],) == labels.shape + + +def test_cluster_qr(): + # cluster_qr by itself should not be used for clustering generic data + # other than the rows of the eigenvectors within spectral clustering, + # but cluster_qr must still preserve the labels for different dtypes + # of the generic fixed input even if the labels may be meaningless. + random_state = np.random.RandomState(seed=8) + n_samples, n_components = 10, 5 + data = random_state.randn(n_samples, n_components) + labels_float64 = cluster_qr(data.astype(np.float64)) + # Each sample is assigned a cluster identifier + assert labels_float64.shape == (n_samples,) + # All components should be covered by the assignment + assert np.array_equal(np.unique(labels_float64), np.arange(n_components)) + # Single precision data should yield the same cluster assignments + labels_float32 = cluster_qr(data.astype(np.float32)) + assert np.array_equal(labels_float64, labels_float32) + + +def test_cluster_qr_permutation_invariance(): + # cluster_qr must be invariant to sample permutation. + random_state = np.random.RandomState(seed=8) + n_samples, n_components = 100, 5 + data = random_state.randn(n_samples, n_components) + perm = random_state.permutation(n_samples) + assert np.array_equal( + cluster_qr(data)[perm], + cluster_qr(data[perm]), + ) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +@pytest.mark.parametrize("n_samples", [50, 100, 150, 500]) +def test_discretize(n_samples, coo_container): + # Test the discretize using a noise assignment matrix + random_state = np.random.RandomState(seed=8) + for n_class in range(2, 10): + # random class labels + y_true = random_state.randint(0, n_class + 1, n_samples) + y_true = np.array(y_true, float) + # noise class assignment matrix + y_indicator = coo_container( + (np.ones(n_samples), (np.arange(n_samples), y_true)), + shape=(n_samples, n_class + 1), + ) + y_true_noisy = y_indicator.toarray() + 0.1 * random_state.randn( + n_samples, n_class + 1 + ) + y_pred = discretize(y_true_noisy, random_state=random_state) + assert adjusted_rand_score(y_true, y_pred) > 0.8 + + +# TODO: Remove when pyamg does replaces sp.rand call with np.random.rand +# https://github.com/scikit-learn/scikit-learn/issues/15913 +@pytest.mark.filterwarnings( + "ignore:scipy.rand is deprecated:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of np.float +@pytest.mark.filterwarnings( + "ignore:`np.float` is a deprecated alias:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of pinv2 +@pytest.mark.filterwarnings( + "ignore:scipy.linalg.pinv2 is deprecated:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of np.find_common_type +@pytest.mark.filterwarnings( + "ignore:np.find_common_type is deprecated:DeprecationWarning:pyamg.*" +) +def test_spectral_clustering_with_arpack_amg_solvers(): + # Test that spectral_clustering is the same for arpack and amg solver + # Based on toy example from plot_segmentation_toy.py + + # a small two coin image + x, y = np.indices((40, 40)) + + center1, center2 = (14, 12), (20, 25) + radius1, radius2 = 8, 7 + + circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1**2 + circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2**2 + + circles = circle1 | circle2 + mask = circles.copy() + img = circles.astype(float) + + graph = img_to_graph(img, mask=mask) + graph.data = np.exp(-graph.data / graph.data.std()) + + labels_arpack = spectral_clustering( + graph, n_clusters=2, eigen_solver="arpack", random_state=0 + ) + + assert len(np.unique(labels_arpack)) == 2 + + if amg_loaded: + labels_amg = spectral_clustering( + graph, n_clusters=2, eigen_solver="amg", random_state=0 + ) + assert adjusted_rand_score(labels_arpack, labels_amg) == 1 + else: + with pytest.raises(ValueError): + spectral_clustering(graph, n_clusters=2, eigen_solver="amg", random_state=0) + + +def test_n_components(): + # Test that after adding n_components, result is different and + # n_components = n_clusters by default + X, y = make_blobs( + n_samples=20, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 + ) + sp = SpectralClustering(n_clusters=2, random_state=0) + labels = sp.fit(X).labels_ + # set n_components = n_cluster and test if result is the same + labels_same_ncomp = ( + SpectralClustering(n_clusters=2, n_components=2, random_state=0).fit(X).labels_ + ) + # test that n_components=n_clusters by default + assert_array_equal(labels, labels_same_ncomp) + + # test that n_components affect result + # n_clusters=8 by default, and set n_components=2 + labels_diff_ncomp = ( + SpectralClustering(n_components=2, random_state=0).fit(X).labels_ + ) + assert not np.array_equal(labels, labels_diff_ncomp) + + +@pytest.mark.parametrize("assign_labels", ("kmeans", "discretize", "cluster_qr")) +def test_verbose(assign_labels, capsys): + # Check verbose mode of KMeans for better coverage. + X, y = make_blobs( + n_samples=20, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 + ) + + SpectralClustering(n_clusters=2, random_state=42, verbose=1).fit(X) + + captured = capsys.readouterr() + + assert re.search(r"Computing label assignment using", captured.out) + + if assign_labels == "kmeans": + assert re.search(r"Initialization complete", captured.out) + assert re.search(r"Iteration [0-9]+, inertia", captured.out) + + +def test_spectral_clustering_np_matrix_raises(): + """Check that spectral_clustering raises an informative error when passed + a np.matrix. See #10993""" + X = np.matrix([[0.0, 2.0], [2.0, 0.0]]) + + msg = r"np\.matrix is not supported. Please convert to a numpy array" + with pytest.raises(TypeError, match=msg): + spectral_clustering(X) + + +def test_spectral_clustering_not_infinite_loop(capsys, monkeypatch): + """Check that discretize raises LinAlgError when svd never converges. + + Non-regression test for #21380 + """ + + def new_svd(*args, **kwargs): + raise LinAlgError() + + monkeypatch.setattr(np.linalg, "svd", new_svd) + vectors = np.ones((10, 4)) + + with pytest.raises(LinAlgError, match="SVD did not converge"): + discretize(vectors) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jd-1.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jd-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..951ceb7f7f17c2f89280aac5d5c2da81afd69d43 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jd-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:862e08520a2433a495a3bd3ae9fd9e6c7c540a9c632db29bb8252784cbdad779 +size 1786 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jdf-1.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jdf-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..2f757032db273b37ef22dc6d4468e675e7bd0915 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jdf-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a966dad58cf5fbc914a374ad5556c0414f5ed962237ed55a379fe96e308d00de +size 889 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jdq-1.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jdq-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..c9c6d8fb40f9db23fb31349fa8a087c288f5dae9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jdq-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84a8726d2c3f8bbca79d54d8b191158744b1993146f8f083b111a8ea78536057 +size 145 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/data-v1-dl-1.arff.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/data-v1-dl-1.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..ee6e378589d722771363d186944ed1f0f78c9836 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/data-v1-dl-1.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfe8945b949770b0da42daf58ce67d1c5fee25cf7b4fd145161837c2abc09429 +size 1841 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jd-1119.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jd-1119.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..8e23c4a4051b50c2a5dbe0b93f4619bbed92b9f3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jd-1119.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c41e5fbb3e59cd4de881ed7c8f88f9b03a750d537ba63581cafde6aafd77adc1 +size 711 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdl-dn-adult-census-l-2-dv-1.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdl-dn-adult-census-l-2-dv-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..c18f2eec9107a3e1455512f8d92e0289bb6d714d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdl-dn-adult-census-l-2-dv-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a5dc36ca9758313978b2a9d79cce763c6f84d5d95f15ac557b3d7482f22ee21 +size 364 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdl-dn-adult-census-l-2-s-act-.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdl-dn-adult-census-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..7b7718d29ecb2075088f54c5f2c5fc0d01d9404b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdl-dn-adult-census-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ec0955788914fa81f698e97a4d1aff773d7a125ed6e769c6271a0b48fc4011d +size 363 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdq-1119.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdq-1119.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..3265a7d933efe836193228b86e84c6c7a8b45afd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdq-1119.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef7cbcb58c2edcfea45c058b751faf7783e710462a924e9aacad8d47a7e9f94b +size 1549 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jd-2.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jd-2.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..24caf1bf71f829c85f13b7d2b8d0a94e4d27f1b3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jd-2.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a672d435b97a6033dfd1d2a5c823d237ad1865101bd5e403cd99b5be0ba4e03b +size 1363 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdf-2.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdf-2.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..be96cc72487b20a47142fb8c999ce032d73fba2e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdf-2.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1b8387a7d08014a1c09807ae458ca7666ab8a3c579cbfb189e09c6d7de892a6 +size 866 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-dv-1.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-dv-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..e1f109fd6086eb97a3be2e7533dc658dac0970d5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-dv-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e900b190795224ff48e46a1c02b10020d4c986ba142880c02c86f0b472ded3c9 +size 309 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-s-act-.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..d5feb2e1a57bf4ba4d811dbff391977f38122fed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff6225cb98260ca4ebec015a1a2754f2a7b0dbfb4d0f17dcf6727542154e2a10 +size 346 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdq-2.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdq-2.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..08e36a9fb7d7eb1d95b74eebf7c1b870d4a052c1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdq-2.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c46f6c5f221d877de604b906403b20cbdf674f1225bcdbb3e15bd1882a69a471 +size 1501 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/data-v1-dl-1666876.arff.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/data-v1-dl-1666876.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..ee6e378589d722771363d186944ed1f0f78c9836 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/data-v1-dl-1666876.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfe8945b949770b0da42daf58ce67d1c5fee25cf7b4fd145161837c2abc09429 +size 1841 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jd-40675.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jd-40675.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..b376ef7c9d32dd344e0fff0be5a30ae1e6dda779 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jd-40675.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a787772d60fbfcc21a0e96fd81906f03542e0b942d19dcc95dae47498953a4fd +size 323 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-dv-1-s-dact.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-dv-1-s-dact.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..336782317369c6fdf4d987c6fd3fdee3309a50e1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-dv-1-s-dact.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21ed1ecc5d874956e951a9361f251afb2165adda92798c89ca5e2f97ae80dd8f +size 317 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-dv-1.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-dv-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..577840cd46f47e22c75975d855fe21c9b997ee22 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-dv-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad0a4a5477605380f8819ce840dbb928a3d084267c512f6cb50d5be2f7c76bc2 +size 85 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-s-act-.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..de6ccfccc5f28d446f34b7ffd7fcf83688cb00cf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:141ba630e039ea44bbaef92a288e2d964fc3aa2ef805a9723b4aac738a26a627 +size 88 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/data-v1-dl-4965250.arff.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/data-v1-dl-4965250.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..d1d26798a46116abdc22f357615f381a19bccf99 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/data-v1-dl-4965250.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:543d0887312f43d9f65a7e1d08be78a2436369f632d7382b4134cebb525a48a3 +size 3000 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..126906cdde1d781b64a443df1e97787fc638a94d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/__init__.py @@ -0,0 +1,11 @@ +""" +The :mod:`sklearn.semi_supervised` module implements semi-supervised learning +algorithms. These algorithms utilize small amounts of labeled data and large +amounts of unlabeled data for classification tasks. This module includes Label +Propagation. +""" + +from ._label_propagation import LabelPropagation, LabelSpreading +from ._self_training import SelfTrainingClassifier + +__all__ = ["SelfTrainingClassifier", "LabelPropagation", "LabelSpreading"] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/_label_propagation.py b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/_label_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..1ae37d06a46f32a9ecf35b4aa5bfddd0cedf3563 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/_label_propagation.py @@ -0,0 +1,623 @@ +# coding=utf8 +""" +Label propagation in the context of this module refers to a set of +semi-supervised classification algorithms. At a high level, these algorithms +work by forming a fully-connected graph between all points given and solving +for the steady-state distribution of labels at each point. + +These algorithms perform very well in practice. The cost of running can be very +expensive, at approximately O(N^3) where N is the number of (labeled and +unlabeled) points. The theory (why they perform so well) is motivated by +intuitions from random walk algorithms and geometric relationships in the data. +For more information see the references below. + +Model Features +-------------- +Label clamping: + The algorithm tries to learn distributions of labels over the dataset given + label assignments over an initial subset. In one variant, the algorithm does + not allow for any errors in the initial assignment (hard-clamping) while + in another variant, the algorithm allows for some wiggle room for the initial + assignments, allowing them to change by a fraction alpha in each iteration + (soft-clamping). + +Kernel: + A function which projects a vector into some higher dimensional space. This + implementation supports RBF and KNN kernels. Using the RBF kernel generates + a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of + size O(k*N) which will run much faster. See the documentation for SVMs for + more info on kernels. + +Examples +-------- +>>> import numpy as np +>>> from sklearn import datasets +>>> from sklearn.semi_supervised import LabelPropagation +>>> label_prop_model = LabelPropagation() +>>> iris = datasets.load_iris() +>>> rng = np.random.RandomState(42) +>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3 +>>> labels = np.copy(iris.target) +>>> labels[random_unlabeled_points] = -1 +>>> label_prop_model.fit(iris.data, labels) +LabelPropagation(...) + +Notes +----- +References: +[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised +Learning (2006), pp. 193-216 + +[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient +Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005 +""" + +# Authors: Clay Woolam +# Utkarsh Upadhyay +# License: BSD +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np +from scipy import sparse + +from ..base import BaseEstimator, ClassifierMixin, _fit_context +from ..exceptions import ConvergenceWarning +from ..metrics.pairwise import rbf_kernel +from ..neighbors import NearestNeighbors +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import safe_sparse_dot +from ..utils.fixes import laplacian as csgraph_laplacian +from ..utils.multiclass import check_classification_targets +from ..utils.validation import check_is_fitted + + +class BaseLabelPropagation(ClassifierMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for label propagation module. + + Parameters + ---------- + kernel : {'knn', 'rbf'} or callable, default='rbf' + String identifier for kernel function to use or the kernel function + itself. Only 'rbf' and 'knn' strings are valid inputs. The function + passed should take two inputs, each of shape (n_samples, n_features), + and return a (n_samples, n_samples) shaped weight matrix. + + gamma : float, default=20 + Parameter for rbf kernel. + + n_neighbors : int, default=7 + Parameter for knn kernel. Need to be strictly positive. + + alpha : float, default=1.0 + Clamping factor. + + max_iter : int, default=30 + Change maximum number of iterations allowed. + + tol : float, default=1e-3 + Convergence tolerance: threshold to consider the system at steady + state. + + n_jobs : int, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + """ + + _parameter_constraints: dict = { + "kernel": [StrOptions({"knn", "rbf"}), callable], + "gamma": [Interval(Real, 0, None, closed="left")], + "n_neighbors": [Interval(Integral, 0, None, closed="neither")], + "alpha": [None, Interval(Real, 0, 1, closed="neither")], + "max_iter": [Interval(Integral, 0, None, closed="neither")], + "tol": [Interval(Real, 0, None, closed="left")], + "n_jobs": [None, Integral], + } + + def __init__( + self, + kernel="rbf", + *, + gamma=20, + n_neighbors=7, + alpha=1, + max_iter=30, + tol=1e-3, + n_jobs=None, + ): + self.max_iter = max_iter + self.tol = tol + + # kernel parameters + self.kernel = kernel + self.gamma = gamma + self.n_neighbors = n_neighbors + + # clamping factor + self.alpha = alpha + + self.n_jobs = n_jobs + + def _get_kernel(self, X, y=None): + if self.kernel == "rbf": + if y is None: + return rbf_kernel(X, X, gamma=self.gamma) + else: + return rbf_kernel(X, y, gamma=self.gamma) + elif self.kernel == "knn": + if self.nn_fit is None: + self.nn_fit = NearestNeighbors( + n_neighbors=self.n_neighbors, n_jobs=self.n_jobs + ).fit(X) + if y is None: + return self.nn_fit.kneighbors_graph( + self.nn_fit._fit_X, self.n_neighbors, mode="connectivity" + ) + else: + return self.nn_fit.kneighbors(y, return_distance=False) + elif callable(self.kernel): + if y is None: + return self.kernel(X, X) + else: + return self.kernel(X, y) + + @abstractmethod + def _build_graph(self): + raise NotImplementedError( + "Graph construction must be implemented to fit a label propagation model." + ) + + def predict(self, X): + """Perform inductive inference across the model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + y : ndarray of shape (n_samples,) + Predictions for input data. + """ + # Note: since `predict` does not accept semi-supervised labels as input, + # `fit(X, y).predict(X) != fit(X, y).transduction_`. + # Hence, `fit_predict` is not implemented. + # See https://github.com/scikit-learn/scikit-learn/pull/24898 + probas = self.predict_proba(X) + return self.classes_[np.argmax(probas, axis=1)].ravel() + + def predict_proba(self, X): + """Predict probability for each possible outcome. + + Compute the probability estimates for each single sample in X + and each possible outcome seen during training (categorical + distribution). + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + probabilities : ndarray of shape (n_samples, n_classes) + Normalized probability distributions across + class labels. + """ + check_is_fitted(self) + + X_2d = self._validate_data( + X, + accept_sparse=["csc", "csr", "coo", "dok", "bsr", "lil", "dia"], + reset=False, + ) + weight_matrices = self._get_kernel(self.X_, X_2d) + if self.kernel == "knn": + probabilities = np.array( + [ + np.sum(self.label_distributions_[weight_matrix], axis=0) + for weight_matrix in weight_matrices + ] + ) + else: + weight_matrices = weight_matrices.T + probabilities = safe_sparse_dot(weight_matrices, self.label_distributions_) + normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T + probabilities /= normalizer + return probabilities + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit a semi-supervised label propagation model to X. + + The input samples (labeled and unlabeled) are provided by matrix X, + and target labels are provided by matrix y. We conventionally apply the + label -1 to unlabeled samples in matrix y in a semi-supervised + classification. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target class values with unlabeled points marked as -1. + All unlabeled samples will be transductively assigned labels + internally, which are stored in `transduction_`. + + Returns + ------- + self : object + Returns the instance itself. + """ + X, y = self._validate_data( + X, + y, + accept_sparse=["csr", "csc"], + reset=True, + ) + self.X_ = X + check_classification_targets(y) + + # actual graph construction (implementations should override this) + graph_matrix = self._build_graph() + + # label construction + # construct a categorical distribution for classification only + classes = np.unique(y) + classes = classes[classes != -1] + self.classes_ = classes + + n_samples, n_classes = len(y), len(classes) + + y = np.asarray(y) + unlabeled = y == -1 + + # initialize distributions + self.label_distributions_ = np.zeros((n_samples, n_classes)) + for label in classes: + self.label_distributions_[y == label, classes == label] = 1 + + y_static = np.copy(self.label_distributions_) + if self._variant == "propagation": + # LabelPropagation + y_static[unlabeled] = 0 + else: + # LabelSpreading + y_static *= 1 - self.alpha + + l_previous = np.zeros((self.X_.shape[0], n_classes)) + + unlabeled = unlabeled[:, np.newaxis] + if sparse.issparse(graph_matrix): + graph_matrix = graph_matrix.tocsr() + + for self.n_iter_ in range(self.max_iter): + if np.abs(self.label_distributions_ - l_previous).sum() < self.tol: + break + + l_previous = self.label_distributions_ + self.label_distributions_ = safe_sparse_dot( + graph_matrix, self.label_distributions_ + ) + + if self._variant == "propagation": + normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis] + normalizer[normalizer == 0] = 1 + self.label_distributions_ /= normalizer + self.label_distributions_ = np.where( + unlabeled, self.label_distributions_, y_static + ) + else: + # clamp + self.label_distributions_ = ( + np.multiply(self.alpha, self.label_distributions_) + y_static + ) + else: + warnings.warn( + "max_iter=%d was reached without convergence." % self.max_iter, + category=ConvergenceWarning, + ) + self.n_iter_ += 1 + + normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis] + normalizer[normalizer == 0] = 1 + self.label_distributions_ /= normalizer + + # set the transduction item + transduction = self.classes_[np.argmax(self.label_distributions_, axis=1)] + self.transduction_ = transduction.ravel() + return self + + +class LabelPropagation(BaseLabelPropagation): + """Label Propagation classifier. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + kernel : {'knn', 'rbf'} or callable, default='rbf' + String identifier for kernel function to use or the kernel function + itself. Only 'rbf' and 'knn' strings are valid inputs. The function + passed should take two inputs, each of shape (n_samples, n_features), + and return a (n_samples, n_samples) shaped weight matrix. + + gamma : float, default=20 + Parameter for rbf kernel. + + n_neighbors : int, default=7 + Parameter for knn kernel which need to be strictly positive. + + max_iter : int, default=1000 + Change maximum number of iterations allowed. + + tol : float, 1e-3 + Convergence tolerance: threshold to consider the system at steady + state. + + n_jobs : int, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + X_ : {array-like, sparse matrix} of shape (n_samples, n_features) + Input array. + + classes_ : ndarray of shape (n_classes,) + The distinct labels used in classifying instances. + + label_distributions_ : ndarray of shape (n_samples, n_classes) + Categorical distribution for each item. + + transduction_ : ndarray of shape (n_samples) + Label assigned to each item during :term:`fit`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run. + + See Also + -------- + LabelSpreading : Alternate label propagation strategy more robust to noise. + + References + ---------- + Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data + with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon + University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf + + Examples + -------- + >>> import numpy as np + >>> from sklearn import datasets + >>> from sklearn.semi_supervised import LabelPropagation + >>> label_prop_model = LabelPropagation() + >>> iris = datasets.load_iris() + >>> rng = np.random.RandomState(42) + >>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3 + >>> labels = np.copy(iris.target) + >>> labels[random_unlabeled_points] = -1 + >>> label_prop_model.fit(iris.data, labels) + LabelPropagation(...) + """ + + _variant = "propagation" + + _parameter_constraints: dict = {**BaseLabelPropagation._parameter_constraints} + _parameter_constraints.pop("alpha") + + def __init__( + self, + kernel="rbf", + *, + gamma=20, + n_neighbors=7, + max_iter=1000, + tol=1e-3, + n_jobs=None, + ): + super().__init__( + kernel=kernel, + gamma=gamma, + n_neighbors=n_neighbors, + max_iter=max_iter, + tol=tol, + n_jobs=n_jobs, + alpha=None, + ) + + def _build_graph(self): + """Matrix representing a fully connected graph between each sample + + This basic implementation creates a non-stochastic affinity matrix, so + class distributions will exceed 1 (normalization may be desired). + """ + if self.kernel == "knn": + self.nn_fit = None + affinity_matrix = self._get_kernel(self.X_) + normalizer = affinity_matrix.sum(axis=0) + if sparse.issparse(affinity_matrix): + affinity_matrix.data /= np.diag(np.array(normalizer)) + else: + affinity_matrix /= normalizer[:, np.newaxis] + return affinity_matrix + + def fit(self, X, y): + """Fit a semi-supervised label propagation model to X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target class values with unlabeled points marked as -1. + All unlabeled samples will be transductively assigned labels + internally, which are stored in `transduction_`. + + Returns + ------- + self : object + Returns the instance itself. + """ + return super().fit(X, y) + + +class LabelSpreading(BaseLabelPropagation): + """LabelSpreading model for semi-supervised learning. + + This model is similar to the basic Label Propagation algorithm, + but uses affinity matrix based on the normalized graph Laplacian + and soft clamping across the labels. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + kernel : {'knn', 'rbf'} or callable, default='rbf' + String identifier for kernel function to use or the kernel function + itself. Only 'rbf' and 'knn' strings are valid inputs. The function + passed should take two inputs, each of shape (n_samples, n_features), + and return a (n_samples, n_samples) shaped weight matrix. + + gamma : float, default=20 + Parameter for rbf kernel. + + n_neighbors : int, default=7 + Parameter for knn kernel which is a strictly positive integer. + + alpha : float, default=0.2 + Clamping factor. A value in (0, 1) that specifies the relative amount + that an instance should adopt the information from its neighbors as + opposed to its initial label. + alpha=0 means keeping the initial label information; alpha=1 means + replacing all initial information. + + max_iter : int, default=30 + Maximum number of iterations allowed. + + tol : float, default=1e-3 + Convergence tolerance: threshold to consider the system at steady + state. + + n_jobs : int, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + X_ : ndarray of shape (n_samples, n_features) + Input array. + + classes_ : ndarray of shape (n_classes,) + The distinct labels used in classifying instances. + + label_distributions_ : ndarray of shape (n_samples, n_classes) + Categorical distribution for each item. + + transduction_ : ndarray of shape (n_samples,) + Label assigned to each item during :term:`fit`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run. + + See Also + -------- + LabelPropagation : Unregularized graph based semi-supervised learning. + + References + ---------- + `Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston, + Bernhard Schoelkopf. Learning with local and global consistency (2004) + `_ + + Examples + -------- + >>> import numpy as np + >>> from sklearn import datasets + >>> from sklearn.semi_supervised import LabelSpreading + >>> label_prop_model = LabelSpreading() + >>> iris = datasets.load_iris() + >>> rng = np.random.RandomState(42) + >>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3 + >>> labels = np.copy(iris.target) + >>> labels[random_unlabeled_points] = -1 + >>> label_prop_model.fit(iris.data, labels) + LabelSpreading(...) + """ + + _variant = "spreading" + + _parameter_constraints: dict = {**BaseLabelPropagation._parameter_constraints} + _parameter_constraints["alpha"] = [Interval(Real, 0, 1, closed="neither")] + + def __init__( + self, + kernel="rbf", + *, + gamma=20, + n_neighbors=7, + alpha=0.2, + max_iter=30, + tol=1e-3, + n_jobs=None, + ): + # this one has different base parameters + super().__init__( + kernel=kernel, + gamma=gamma, + n_neighbors=n_neighbors, + alpha=alpha, + max_iter=max_iter, + tol=tol, + n_jobs=n_jobs, + ) + + def _build_graph(self): + """Graph matrix for Label Spreading computes the graph laplacian""" + # compute affinity matrix (or gram matrix) + if self.kernel == "knn": + self.nn_fit = None + n_samples = self.X_.shape[0] + affinity_matrix = self._get_kernel(self.X_) + laplacian = csgraph_laplacian(affinity_matrix, normed=True) + laplacian = -laplacian + if sparse.issparse(laplacian): + diag_mask = laplacian.row == laplacian.col + laplacian.data[diag_mask] = 0.0 + else: + laplacian.flat[:: n_samples + 1] = 0.0 # set diag to 0.0 + return laplacian diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/_self_training.py b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/_self_training.py new file mode 100644 index 0000000000000000000000000000000000000000..810447c1e6f460df424034c4a89054421f525295 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/semi_supervised/_self_training.py @@ -0,0 +1,417 @@ +import warnings +from numbers import Integral, Real + +import numpy as np + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone +from ..utils import safe_mask +from ..utils._param_validation import HasMethods, Interval, StrOptions +from ..utils.metadata_routing import _RoutingNotSupportedMixin +from ..utils.metaestimators import available_if +from ..utils.validation import check_is_fitted + +__all__ = ["SelfTrainingClassifier"] + +# Authors: Oliver Rausch +# Patrice Becker +# License: BSD 3 clause + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + First, we check the fitted `base_estimator_` if available, otherwise we check + the unfitted `base_estimator`. We raise the original `AttributeError` if + `attr` does not exist. This function is used together with `available_if`. + """ + + def check(self): + if hasattr(self, "base_estimator_"): + getattr(self.base_estimator_, attr) + else: + getattr(self.base_estimator, attr) + + return True + + return check + + +class SelfTrainingClassifier( + _RoutingNotSupportedMixin, MetaEstimatorMixin, BaseEstimator +): + """Self-training classifier. + + This :term:`metaestimator` allows a given supervised classifier to function as a + semi-supervised classifier, allowing it to learn from unlabeled data. It + does this by iteratively predicting pseudo-labels for the unlabeled data + and adding them to the training set. + + The classifier will continue iterating until either max_iter is reached, or + no pseudo-labels were added to the training set in the previous iteration. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + base_estimator : estimator object + An estimator object implementing `fit` and `predict_proba`. + Invoking the `fit` method will fit a clone of the passed estimator, + which will be stored in the `base_estimator_` attribute. + + threshold : float, default=0.75 + The decision threshold for use with `criterion='threshold'`. + Should be in [0, 1). When using the `'threshold'` criterion, a + :ref:`well calibrated classifier ` should be used. + + criterion : {'threshold', 'k_best'}, default='threshold' + The selection criterion used to select which labels to add to the + training set. If `'threshold'`, pseudo-labels with prediction + probabilities above `threshold` are added to the dataset. If `'k_best'`, + the `k_best` pseudo-labels with highest prediction probabilities are + added to the dataset. When using the 'threshold' criterion, a + :ref:`well calibrated classifier ` should be used. + + k_best : int, default=10 + The amount of samples to add in each iteration. Only used when + `criterion='k_best'`. + + max_iter : int or None, default=10 + Maximum number of iterations allowed. Should be greater than or equal + to 0. If it is `None`, the classifier will continue to predict labels + until no new pseudo-labels are added, or all unlabeled samples have + been labeled. + + verbose : bool, default=False + Enable verbose output. + + Attributes + ---------- + base_estimator_ : estimator object + The fitted estimator. + + classes_ : ndarray or list of ndarray of shape (n_classes,) + Class labels for each output. (Taken from the trained + `base_estimator_`). + + transduction_ : ndarray of shape (n_samples,) + The labels used for the final fit of the classifier, including + pseudo-labels added during fit. + + labeled_iter_ : ndarray of shape (n_samples,) + The iteration in which each sample was labeled. When a sample has + iteration 0, the sample was already labeled in the original dataset. + When a sample has iteration -1, the sample was not labeled in any + iteration. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The number of rounds of self-training, that is the number of times the + base estimator is fitted on relabeled variants of the training set. + + termination_condition_ : {'max_iter', 'no_change', 'all_labeled'} + The reason that fitting was stopped. + + - `'max_iter'`: `n_iter_` reached `max_iter`. + - `'no_change'`: no new labels were predicted. + - `'all_labeled'`: all unlabeled samples were labeled before `max_iter` + was reached. + + See Also + -------- + LabelPropagation : Label propagation classifier. + LabelSpreading : Label spreading model for semi-supervised learning. + + References + ---------- + :doi:`David Yarowsky. 1995. Unsupervised word sense disambiguation rivaling + supervised methods. In Proceedings of the 33rd annual meeting on + Association for Computational Linguistics (ACL '95). Association for + Computational Linguistics, Stroudsburg, PA, USA, 189-196. + <10.3115/981658.981684>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn import datasets + >>> from sklearn.semi_supervised import SelfTrainingClassifier + >>> from sklearn.svm import SVC + >>> rng = np.random.RandomState(42) + >>> iris = datasets.load_iris() + >>> random_unlabeled_points = rng.rand(iris.target.shape[0]) < 0.3 + >>> iris.target[random_unlabeled_points] = -1 + >>> svc = SVC(probability=True, gamma="auto") + >>> self_training_model = SelfTrainingClassifier(svc) + >>> self_training_model.fit(iris.data, iris.target) + SelfTrainingClassifier(...) + """ + + _estimator_type = "classifier" + + _parameter_constraints: dict = { + # We don't require `predic_proba` here to allow passing a meta-estimator + # that only exposes `predict_proba` after fitting. + "base_estimator": [HasMethods(["fit"])], + "threshold": [Interval(Real, 0.0, 1.0, closed="left")], + "criterion": [StrOptions({"threshold", "k_best"})], + "k_best": [Interval(Integral, 1, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left"), None], + "verbose": ["verbose"], + } + + def __init__( + self, + base_estimator, + threshold=0.75, + criterion="threshold", + k_best=10, + max_iter=10, + verbose=False, + ): + self.base_estimator = base_estimator + self.threshold = threshold + self.criterion = criterion + self.k_best = k_best + self.max_iter = max_iter + self.verbose = verbose + + @_fit_context( + # SelfTrainingClassifier.base_estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """ + Fit self-training classifier using `X`, `y` as training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array representing the data. + + y : {array-like, sparse matrix} of shape (n_samples,) + Array representing the labels. Unlabeled samples should have the + label -1. + + Returns + ------- + self : object + Fitted estimator. + """ + # we need row slicing support for sparse matrices, but costly finiteness check + # can be delegated to the base estimator. + X, y = self._validate_data( + X, y, accept_sparse=["csr", "csc", "lil", "dok"], force_all_finite=False + ) + + self.base_estimator_ = clone(self.base_estimator) + + if y.dtype.kind in ["U", "S"]: + raise ValueError( + "y has dtype string. If you wish to predict on " + "string targets, use dtype object, and use -1" + " as the label for unlabeled samples." + ) + + has_label = y != -1 + + if np.all(has_label): + warnings.warn("y contains no unlabeled samples", UserWarning) + + if self.criterion == "k_best" and ( + self.k_best > X.shape[0] - np.sum(has_label) + ): + warnings.warn( + ( + "k_best is larger than the amount of unlabeled " + "samples. All unlabeled samples will be labeled in " + "the first iteration" + ), + UserWarning, + ) + + self.transduction_ = np.copy(y) + self.labeled_iter_ = np.full_like(y, -1) + self.labeled_iter_[has_label] = 0 + + self.n_iter_ = 0 + + while not np.all(has_label) and ( + self.max_iter is None or self.n_iter_ < self.max_iter + ): + self.n_iter_ += 1 + self.base_estimator_.fit( + X[safe_mask(X, has_label)], self.transduction_[has_label] + ) + + # Predict on the unlabeled samples + prob = self.base_estimator_.predict_proba(X[safe_mask(X, ~has_label)]) + pred = self.base_estimator_.classes_[np.argmax(prob, axis=1)] + max_proba = np.max(prob, axis=1) + + # Select new labeled samples + if self.criterion == "threshold": + selected = max_proba > self.threshold + else: + n_to_select = min(self.k_best, max_proba.shape[0]) + if n_to_select == max_proba.shape[0]: + selected = np.ones_like(max_proba, dtype=bool) + else: + # NB these are indices, not a mask + selected = np.argpartition(-max_proba, n_to_select)[:n_to_select] + + # Map selected indices into original array + selected_full = np.nonzero(~has_label)[0][selected] + + # Add newly labeled confident predictions to the dataset + self.transduction_[selected_full] = pred[selected] + has_label[selected_full] = True + self.labeled_iter_[selected_full] = self.n_iter_ + + if selected_full.shape[0] == 0: + # no changed labels + self.termination_condition_ = "no_change" + break + + if self.verbose: + print( + f"End of iteration {self.n_iter_}," + f" added {selected_full.shape[0]} new labels." + ) + + if self.n_iter_ == self.max_iter: + self.termination_condition_ = "max_iter" + if np.all(has_label): + self.termination_condition_ = "all_labeled" + + self.base_estimator_.fit( + X[safe_mask(X, has_label)], self.transduction_[has_label] + ) + self.classes_ = self.base_estimator_.classes_ + return self + + @available_if(_estimator_has("predict")) + def predict(self, X): + """Predict the classes of `X`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array representing the data. + + Returns + ------- + y : ndarray of shape (n_samples,) + Array with predicted labels. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=True, + force_all_finite=False, + reset=False, + ) + return self.base_estimator_.predict(X) + + @available_if(_estimator_has("predict_proba")) + def predict_proba(self, X): + """Predict probability for each possible outcome. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array representing the data. + + Returns + ------- + y : ndarray of shape (n_samples, n_features) + Array with prediction probabilities. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=True, + force_all_finite=False, + reset=False, + ) + return self.base_estimator_.predict_proba(X) + + @available_if(_estimator_has("decision_function")) + def decision_function(self, X): + """Call decision function of the `base_estimator`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array representing the data. + + Returns + ------- + y : ndarray of shape (n_samples, n_features) + Result of the decision function of the `base_estimator`. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=True, + force_all_finite=False, + reset=False, + ) + return self.base_estimator_.decision_function(X) + + @available_if(_estimator_has("predict_log_proba")) + def predict_log_proba(self, X): + """Predict log probability for each possible outcome. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array representing the data. + + Returns + ------- + y : ndarray of shape (n_samples, n_features) + Array with log prediction probabilities. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=True, + force_all_finite=False, + reset=False, + ) + return self.base_estimator_.predict_log_proba(X) + + @available_if(_estimator_has("score")) + def score(self, X, y): + """Call score on the `base_estimator`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array representing the data. + + y : array-like of shape (n_samples,) + Array representing the labels. + + Returns + ------- + score : float + Result of calling score on the `base_estimator`. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=True, + force_all_finite=False, + reset=False, + ) + return self.base_estimator_.score(X, y)