diff --git a/ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..3e474720950ab432827bece298f627a4c780b5cb --- /dev/null +++ b/ckpts/universal/global_step40/zero/14.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dcc4f15d99faf64fa1a42ca9ef283e9de214645ac2b982fb8f8ab9ebe175680 +size 33555533 diff --git a/ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..49d5a6d310b0b296f0001d5ea1a59b5d66f955f4 --- /dev/null +++ b/ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b9ac618dac9dcb48bdd7092028de9b66854c94ea938ec8568a6b7eb4676a1f5 +size 50332828 diff --git a/ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..19c35e069625183eb72f70790b75e90b88359514 --- /dev/null +++ b/ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da06feecd6d1e718d9b9167e25651b0e8a80cc781019405e3f4e2f7e4f3caaaf +size 50332843 diff --git a/ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..6a3435e76f015af3a7923e370c33502480cbce39 --- /dev/null +++ b/ckpts/universal/global_step40/zero/22.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:884dd2ccf06672fbfd2a1a6967c1c6009be0cf2f6fccffc267fe1c7e107020a8 +size 50332749 diff --git a/ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..fc8e5bcddda9ac1bc407eea7cb043fdf39fcced4 --- /dev/null +++ b/ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32d106debe5c31a8b7a7adc8a86f1fbf36057d61a321b48f758e2d527ad96239 +size 33555612 diff --git a/ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..b9cd14471be36f6b04999a17f056b616c93606e5 --- /dev/null +++ b/ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d33edf71fffdb23d152d4eb93bbd429470641ea212b0732c5a41fd6039f6417 +size 33555627 diff --git a/ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..eb04615671c83a53c5d5cd0886b1109f15816631 --- /dev/null +++ b/ckpts/universal/global_step40/zero/4.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7513cb9647fb584b4b7f27664d47405ce13bcb1ceaeadd82cb27e2483fc7a6e7 +size 33555533 diff --git a/ckpts/universal/global_step40/zero/5.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/5.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..b5c2d05bc01ddab204807ced8aa64d858ea93094 --- /dev/null +++ b/ckpts/universal/global_step40/zero/5.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2070dc0cf506fcb7ab6cd339af1893f171326c5036101c10165116f6bb89c9c +size 16778396 diff --git a/ckpts/universal/global_step40/zero/5.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/5.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..2c58d16b195c9608b5f0b6d9693efb81300d90cf --- /dev/null +++ b/ckpts/universal/global_step40/zero/5.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:565b00d218ce0f593c4250f8b2b43c531b090f8676cd6f398a86ccfb9ed5100a +size 16778411 diff --git a/ckpts/universal/global_step40/zero/5.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/5.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..70700bf2ed5fb478d9db063a8061e35636dcc195 --- /dev/null +++ b/ckpts/universal/global_step40/zero/5.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acaf21d177b9f556a4e87b2a4af8ff7756dd368faa9fe6c54f4b7f1bab938219 +size 16778317 diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__init__.py b/venv/lib/python3.10/site-packages/sklearn/cluster/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d3104d816bfb63e2038adb9897b2d15cd0d9c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/__init__.py @@ -0,0 +1,56 @@ +""" +The :mod:`sklearn.cluster` module gathers popular unsupervised clustering +algorithms. +""" + +from ._affinity_propagation import AffinityPropagation, affinity_propagation +from ._agglomerative import ( + AgglomerativeClustering, + FeatureAgglomeration, + linkage_tree, + ward_tree, +) +from ._bicluster import SpectralBiclustering, SpectralCoclustering +from ._birch import Birch +from ._bisect_k_means import BisectingKMeans +from ._dbscan import DBSCAN, dbscan +from ._hdbscan.hdbscan import HDBSCAN +from ._kmeans import KMeans, MiniBatchKMeans, k_means, kmeans_plusplus +from ._mean_shift import MeanShift, estimate_bandwidth, get_bin_seeds, mean_shift +from ._optics import ( + OPTICS, + cluster_optics_dbscan, + cluster_optics_xi, + compute_optics_graph, +) +from ._spectral import SpectralClustering, spectral_clustering + +__all__ = [ + "AffinityPropagation", + "AgglomerativeClustering", + "Birch", + "DBSCAN", + "OPTICS", + "cluster_optics_dbscan", + "cluster_optics_xi", + "compute_optics_graph", + "KMeans", + "BisectingKMeans", + "FeatureAgglomeration", + "MeanShift", + "MiniBatchKMeans", + "SpectralClustering", + "affinity_propagation", + "dbscan", + "estimate_bandwidth", + "get_bin_seeds", + "k_means", + "kmeans_plusplus", + "linkage_tree", + "mean_shift", + "spectral_clustering", + "ward_tree", + "SpectralBiclustering", + "SpectralCoclustering", + "HDBSCAN", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b99f22ea55972a61e3a22216ec510c2530a96d39 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7880e8d3f3334c979b8d2c9a45da44b504b2313 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01091932a6d0ae5fb1bf3b64d5481413f8cbdab3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8596f05541353e6456d1196f393d74982cac196f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43601c9152eacae43722f77d938a628af686f839 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_birch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63c9a7ad2ddcdf8c81ed4c73d76594155148459e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2262bac59dca0909191ef65f25a09fe7e5bef59 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6f8e6f4d382ffa59301ff521c1c0e4310139b71 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f134598f426c62e0b760067c0fad990935f293f6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c65acc814cf21ed8d39c1774deaa3c15dc35ee5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d931781225cde8c924221558a64aa50aebfb617b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_optics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d227770a55f4d3fe016b4976ad1fe8f751dccbb Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/__pycache__/_spectral.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_affinity_propagation.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_affinity_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..735e30d3ea4b29f65a29a297ad525fd2780001b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_affinity_propagation.py @@ -0,0 +1,604 @@ +"""Affinity Propagation clustering algorithm.""" + +# Author: Alexandre Gramfort alexandre.gramfort@inria.fr +# Gael Varoquaux gael.varoquaux@normalesup.org + +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np + +from .._config import config_context +from ..base import BaseEstimator, ClusterMixin, _fit_context +from ..exceptions import ConvergenceWarning +from ..metrics import euclidean_distances, pairwise_distances_argmin +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.validation import check_is_fitted + + +def _equal_similarities_and_preferences(S, preference): + def all_equal_preferences(): + return np.all(preference == preference.flat[0]) + + def all_equal_similarities(): + # Create mask to ignore diagonal of S + mask = np.ones(S.shape, dtype=bool) + np.fill_diagonal(mask, 0) + + return np.all(S[mask].flat == S[mask].flat[0]) + + return all_equal_preferences() and all_equal_similarities() + + +def _affinity_propagation( + S, + *, + preference, + convergence_iter, + max_iter, + damping, + verbose, + return_n_iter, + random_state, +): + """Main affinity propagation algorithm.""" + n_samples = S.shape[0] + if n_samples == 1 or _equal_similarities_and_preferences(S, preference): + # It makes no sense to run the algorithm in this case, so return 1 or + # n_samples clusters, depending on preferences + warnings.warn( + "All samples have mutually equal similarities. " + "Returning arbitrary cluster center(s)." + ) + if preference.flat[0] > S.flat[n_samples - 1]: + return ( + (np.arange(n_samples), np.arange(n_samples), 0) + if return_n_iter + else (np.arange(n_samples), np.arange(n_samples)) + ) + else: + return ( + (np.array([0]), np.array([0] * n_samples), 0) + if return_n_iter + else (np.array([0]), np.array([0] * n_samples)) + ) + + # Place preference on the diagonal of S + S.flat[:: (n_samples + 1)] = preference + + A = np.zeros((n_samples, n_samples)) + R = np.zeros((n_samples, n_samples)) # Initialize messages + # Intermediate results + tmp = np.zeros((n_samples, n_samples)) + + # Remove degeneracies + S += ( + np.finfo(S.dtype).eps * S + np.finfo(S.dtype).tiny * 100 + ) * random_state.standard_normal(size=(n_samples, n_samples)) + + # Execute parallel affinity propagation updates + e = np.zeros((n_samples, convergence_iter)) + + ind = np.arange(n_samples) + + for it in range(max_iter): + # tmp = A + S; compute responsibilities + np.add(A, S, tmp) + I = np.argmax(tmp, axis=1) + Y = tmp[ind, I] # np.max(A + S, axis=1) + tmp[ind, I] = -np.inf + Y2 = np.max(tmp, axis=1) + + # tmp = Rnew + np.subtract(S, Y[:, None], tmp) + tmp[ind, I] = S[ind, I] - Y2 + + # Damping + tmp *= 1 - damping + R *= damping + R += tmp + + # tmp = Rp; compute availabilities + np.maximum(R, 0, tmp) + tmp.flat[:: n_samples + 1] = R.flat[:: n_samples + 1] + + # tmp = -Anew + tmp -= np.sum(tmp, axis=0) + dA = np.diag(tmp).copy() + tmp.clip(0, np.inf, tmp) + tmp.flat[:: n_samples + 1] = dA + + # Damping + tmp *= 1 - damping + A *= damping + A -= tmp + + # Check for convergence + E = (np.diag(A) + np.diag(R)) > 0 + e[:, it % convergence_iter] = E + K = np.sum(E, axis=0) + + if it >= convergence_iter: + se = np.sum(e, axis=1) + unconverged = np.sum((se == convergence_iter) + (se == 0)) != n_samples + if (not unconverged and (K > 0)) or (it == max_iter): + never_converged = False + if verbose: + print("Converged after %d iterations." % it) + break + else: + never_converged = True + if verbose: + print("Did not converge") + + I = np.flatnonzero(E) + K = I.size # Identify exemplars + + if K > 0: + if never_converged: + warnings.warn( + ( + "Affinity propagation did not converge, this model " + "may return degenerate cluster centers and labels." + ), + ConvergenceWarning, + ) + c = np.argmax(S[:, I], axis=1) + c[I] = np.arange(K) # Identify clusters + # Refine the final set of exemplars and clusters and return results + for k in range(K): + ii = np.where(c == k)[0] + j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0)) + I[k] = ii[j] + + c = np.argmax(S[:, I], axis=1) + c[I] = np.arange(K) + labels = I[c] + # Reduce labels to a sorted, gapless, list + cluster_centers_indices = np.unique(labels) + labels = np.searchsorted(cluster_centers_indices, labels) + else: + warnings.warn( + ( + "Affinity propagation did not converge and this model " + "will not have any cluster centers." + ), + ConvergenceWarning, + ) + labels = np.array([-1] * n_samples) + cluster_centers_indices = [] + + if return_n_iter: + return cluster_centers_indices, labels, it + 1 + else: + return cluster_centers_indices, labels + + +############################################################################### +# Public API + + +@validate_params( + { + "S": ["array-like"], + "return_n_iter": ["boolean"], + }, + prefer_skip_nested_validation=False, +) +def affinity_propagation( + S, + *, + preference=None, + convergence_iter=15, + max_iter=200, + damping=0.5, + copy=True, + verbose=False, + return_n_iter=False, + random_state=None, +): + """Perform Affinity Propagation Clustering of data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + S : array-like of shape (n_samples, n_samples) + Matrix of similarities between points. + + preference : array-like of shape (n_samples,) or float, default=None + Preferences for each point - points with larger values of + preferences are more likely to be chosen as exemplars. The number of + exemplars, i.e. of clusters, is influenced by the input preferences + value. If the preferences are not passed as arguments, they will be + set to the median of the input similarities (resulting in a moderate + number of clusters). For a smaller amount of clusters, this can be set + to the minimum value of the similarities. + + convergence_iter : int, default=15 + Number of iterations with no change in the number + of estimated clusters that stops the convergence. + + max_iter : int, default=200 + Maximum number of iterations. + + damping : float, default=0.5 + Damping factor between 0.5 and 1. + + copy : bool, default=True + If copy is False, the affinity matrix is modified inplace by the + algorithm, for memory efficiency. + + verbose : bool, default=False + The verbosity level. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the starting state. + Use an int for reproducible results across function calls. + See the :term:`Glossary `. + + .. versionadded:: 0.23 + this parameter was previously hardcoded as 0. + + Returns + ------- + cluster_centers_indices : ndarray of shape (n_clusters,) + Index of clusters centers. + + labels : ndarray of shape (n_samples,) + Cluster labels for each point. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is + set to True. + + Notes + ----- + For an example, see :ref:`examples/cluster/plot_affinity_propagation.py + `. + + When the algorithm does not converge, it will still return a arrays of + ``cluster_center_indices`` and labels if there are any exemplars/clusters, + however they may be degenerate and should be used with caution. + + When all training samples have equal similarities and equal preferences, + the assignment of cluster centers and labels depends on the preference. + If the preference is smaller than the similarities, a single cluster center + and label ``0`` for every sample will be returned. Otherwise, every + training sample becomes its own cluster center and is assigned a unique + label. + + References + ---------- + Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages + Between Data Points", Science Feb. 2007 + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import affinity_propagation + >>> from sklearn.metrics.pairwise import euclidean_distances + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [4, 2], [4, 4], [4, 0]]) + >>> S = -euclidean_distances(X, squared=True) + >>> cluster_centers_indices, labels = affinity_propagation(S, random_state=0) + >>> cluster_centers_indices + array([0, 3]) + >>> labels + array([0, 0, 0, 1, 1, 1]) + """ + estimator = AffinityPropagation( + damping=damping, + max_iter=max_iter, + convergence_iter=convergence_iter, + copy=copy, + preference=preference, + affinity="precomputed", + verbose=verbose, + random_state=random_state, + ).fit(S) + + if return_n_iter: + return estimator.cluster_centers_indices_, estimator.labels_, estimator.n_iter_ + return estimator.cluster_centers_indices_, estimator.labels_ + + +class AffinityPropagation(ClusterMixin, BaseEstimator): + """Perform Affinity Propagation Clustering of data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + damping : float, default=0.5 + Damping factor in the range `[0.5, 1.0)` is the extent to + which the current value is maintained relative to + incoming values (weighted 1 - damping). This in order + to avoid numerical oscillations when updating these + values (messages). + + max_iter : int, default=200 + Maximum number of iterations. + + convergence_iter : int, default=15 + Number of iterations with no change in the number + of estimated clusters that stops the convergence. + + copy : bool, default=True + Make a copy of input data. + + preference : array-like of shape (n_samples,) or float, default=None + Preferences for each point - points with larger values of + preferences are more likely to be chosen as exemplars. The number + of exemplars, ie of clusters, is influenced by the input + preferences value. If the preferences are not passed as arguments, + they will be set to the median of the input similarities. + + affinity : {'euclidean', 'precomputed'}, default='euclidean' + Which affinity to use. At the moment 'precomputed' and + ``euclidean`` are supported. 'euclidean' uses the + negative squared euclidean distance between points. + + verbose : bool, default=False + Whether to be verbose. + + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the starting state. + Use an int for reproducible results across function calls. + See the :term:`Glossary `. + + .. versionadded:: 0.23 + this parameter was previously hardcoded as 0. + + Attributes + ---------- + cluster_centers_indices_ : ndarray of shape (n_clusters,) + Indices of cluster centers. + + cluster_centers_ : ndarray of shape (n_clusters, n_features) + Cluster centers (if affinity != ``precomputed``). + + labels_ : ndarray of shape (n_samples,) + Labels of each point. + + affinity_matrix_ : ndarray of shape (n_samples, n_samples) + Stores the affinity matrix used in ``fit``. + + n_iter_ : int + Number of iterations taken to converge. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AgglomerativeClustering : Recursively merges the pair of + clusters that minimally increases a given linkage distance. + FeatureAgglomeration : Similar to AgglomerativeClustering, + but recursively merges features instead of samples. + KMeans : K-Means clustering. + MiniBatchKMeans : Mini-Batch K-Means clustering. + MeanShift : Mean shift clustering using a flat kernel. + SpectralClustering : Apply clustering to a projection + of the normalized Laplacian. + + Notes + ----- + For an example, see :ref:`examples/cluster/plot_affinity_propagation.py + `. + + The algorithmic complexity of affinity propagation is quadratic + in the number of points. + + When the algorithm does not converge, it will still return a arrays of + ``cluster_center_indices`` and labels if there are any exemplars/clusters, + however they may be degenerate and should be used with caution. + + When ``fit`` does not converge, ``cluster_centers_`` is still populated + however it may be degenerate. In such a case, proceed with caution. + If ``fit`` does not converge and fails to produce any ``cluster_centers_`` + then ``predict`` will label every sample as ``-1``. + + When all training samples have equal similarities and equal preferences, + the assignment of cluster centers and labels depends on the preference. + If the preference is smaller than the similarities, ``fit`` will result in + a single cluster center and label ``0`` for every sample. Otherwise, every + training sample becomes its own cluster center and is assigned a unique + label. + + References + ---------- + + Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages + Between Data Points", Science Feb. 2007 + + Examples + -------- + >>> from sklearn.cluster import AffinityPropagation + >>> import numpy as np + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [4, 2], [4, 4], [4, 0]]) + >>> clustering = AffinityPropagation(random_state=5).fit(X) + >>> clustering + AffinityPropagation(random_state=5) + >>> clustering.labels_ + array([0, 0, 0, 1, 1, 1]) + >>> clustering.predict([[0, 0], [4, 4]]) + array([0, 1]) + >>> clustering.cluster_centers_ + array([[1, 2], + [4, 2]]) + """ + + _parameter_constraints: dict = { + "damping": [Interval(Real, 0.5, 1.0, closed="left")], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "convergence_iter": [Interval(Integral, 1, None, closed="left")], + "copy": ["boolean"], + "preference": [ + "array-like", + Interval(Real, None, None, closed="neither"), + None, + ], + "affinity": [StrOptions({"euclidean", "precomputed"})], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + *, + damping=0.5, + max_iter=200, + convergence_iter=15, + copy=True, + preference=None, + affinity="euclidean", + verbose=False, + random_state=None, + ): + self.damping = damping + self.max_iter = max_iter + self.convergence_iter = convergence_iter + self.copy = copy + self.verbose = verbose + self.preference = preference + self.affinity = affinity + self.random_state = random_state + + def _more_tags(self): + return {"pairwise": self.affinity == "precomputed"} + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the clustering from features, or affinity matrix. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ + array-like of shape (n_samples, n_samples) + Training instances to cluster, or similarities / affinities between + instances if ``affinity='precomputed'``. If a sparse feature matrix + is provided, it will be converted into a sparse ``csr_matrix``. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self + Returns the instance itself. + """ + if self.affinity == "precomputed": + accept_sparse = False + else: + accept_sparse = "csr" + X = self._validate_data(X, accept_sparse=accept_sparse) + if self.affinity == "precomputed": + self.affinity_matrix_ = X.copy() if self.copy else X + else: # self.affinity == "euclidean" + self.affinity_matrix_ = -euclidean_distances(X, squared=True) + + if self.affinity_matrix_.shape[0] != self.affinity_matrix_.shape[1]: + raise ValueError( + "The matrix of similarities must be a square array. " + f"Got {self.affinity_matrix_.shape} instead." + ) + + if self.preference is None: + preference = np.median(self.affinity_matrix_) + else: + preference = self.preference + preference = np.asarray(preference) + + random_state = check_random_state(self.random_state) + + ( + self.cluster_centers_indices_, + self.labels_, + self.n_iter_, + ) = _affinity_propagation( + self.affinity_matrix_, + max_iter=self.max_iter, + convergence_iter=self.convergence_iter, + preference=preference, + damping=self.damping, + verbose=self.verbose, + return_n_iter=True, + random_state=random_state, + ) + + if self.affinity != "precomputed": + self.cluster_centers_ = X[self.cluster_centers_indices_].copy() + + return self + + def predict(self, X): + """Predict the closest cluster each sample in X belongs to. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data to predict. If a sparse matrix is provided, it will be + converted into a sparse ``csr_matrix``. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Cluster labels. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False, accept_sparse="csr") + if not hasattr(self, "cluster_centers_"): + raise ValueError( + "Predict method is not supported when affinity='precomputed'." + ) + + if self.cluster_centers_.shape[0] > 0: + with config_context(assume_finite=True): + return pairwise_distances_argmin(X, self.cluster_centers_) + else: + warnings.warn( + ( + "This model does not have any cluster centers " + "because affinity propagation did not converge. " + "Labeling every sample as '-1'." + ), + ConvergenceWarning, + ) + return np.array([-1] * X.shape[0]) + + def fit_predict(self, X, y=None): + """Fit clustering from features/affinity matrix; return cluster labels. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ + array-like of shape (n_samples, n_samples) + Training instances to cluster, or similarities / affinities between + instances if ``affinity='precomputed'``. If a sparse feature matrix + is provided, it will be converted into a sparse ``csr_matrix``. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Cluster labels. + """ + return super().fit_predict(X, y) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_agglomerative.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_agglomerative.py new file mode 100644 index 0000000000000000000000000000000000000000..884d1605e70c3b3b3936fce956d3fb7f55ff449a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_agglomerative.py @@ -0,0 +1,1336 @@ +"""Hierarchical Agglomerative Clustering + +These routines perform some hierarchical agglomerative clustering of some +input data. + +Authors : Vincent Michel, Bertrand Thirion, Alexandre Gramfort, + Gael Varoquaux +License: BSD 3 clause +""" +import warnings +from heapq import heapify, heappop, heappush, heappushpop +from numbers import Integral, Real + +import numpy as np +from scipy import sparse +from scipy.sparse.csgraph import connected_components + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + ClusterMixin, + _fit_context, +) +from ..metrics import DistanceMetric +from ..metrics._dist_metrics import METRIC_MAPPING64 +from ..metrics.pairwise import _VALID_METRICS, paired_distances +from ..utils import check_array +from ..utils._fast_dict import IntFloatDict +from ..utils._param_validation import ( + HasMethods, + Hidden, + Interval, + StrOptions, + validate_params, +) +from ..utils.graph import _fix_connected_components +from ..utils.validation import check_memory + +# mypy error: Module 'sklearn.cluster' has no attribute '_hierarchical_fast' +from . import _hierarchical_fast as _hierarchical # type: ignore +from ._feature_agglomeration import AgglomerationTransform + +############################################################################### +# For non fully-connected graphs + + +def _fix_connectivity(X, connectivity, affinity): + """ + Fixes the connectivity matrix. + + The different steps are: + + - copies it + - makes it symmetric + - converts it to LIL if necessary + - completes it if necessary. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Feature matrix representing `n_samples` samples to be clustered. + + connectivity : sparse matrix, default=None + Connectivity matrix. Defines for each sample the neighboring samples + following a given structure of the data. The matrix is assumed to + be symmetric and only the upper triangular half is used. + Default is `None`, i.e, the Ward algorithm is unstructured. + + affinity : {"euclidean", "precomputed"}, default="euclidean" + Which affinity to use. At the moment `precomputed` and + ``euclidean`` are supported. `euclidean` uses the + negative squared Euclidean distance between points. + + Returns + ------- + connectivity : sparse matrix + The fixed connectivity matrix. + + n_connected_components : int + The number of connected components in the graph. + """ + n_samples = X.shape[0] + if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples: + raise ValueError( + "Wrong shape for connectivity matrix: %s when X is %s" + % (connectivity.shape, X.shape) + ) + + # Make the connectivity matrix symmetric: + connectivity = connectivity + connectivity.T + + # Convert connectivity matrix to LIL + if not sparse.issparse(connectivity): + connectivity = sparse.lil_matrix(connectivity) + + # `connectivity` is a sparse matrix at this point + if connectivity.format != "lil": + connectivity = connectivity.tolil() + + # Compute the number of nodes + n_connected_components, labels = connected_components(connectivity) + + if n_connected_components > 1: + warnings.warn( + "the number of connected components of the " + "connectivity matrix is %d > 1. Completing it to avoid " + "stopping the tree early." % n_connected_components, + stacklevel=2, + ) + # XXX: Can we do without completing the matrix? + connectivity = _fix_connected_components( + X=X, + graph=connectivity, + n_connected_components=n_connected_components, + component_labels=labels, + metric=affinity, + mode="connectivity", + ) + + return connectivity, n_connected_components + + +def _single_linkage_tree( + connectivity, + n_samples, + n_nodes, + n_clusters, + n_connected_components, + return_distance, +): + """ + Perform single linkage clustering on sparse data via the minimum + spanning tree from scipy.sparse.csgraph, then using union-find to label. + The parent array is then generated by walking through the tree. + """ + from scipy.sparse.csgraph import minimum_spanning_tree + + # explicitly cast connectivity to ensure safety + connectivity = connectivity.astype(np.float64, copy=False) + + # Ensure zero distances aren't ignored by setting them to "epsilon" + epsilon_value = np.finfo(dtype=connectivity.data.dtype).eps + connectivity.data[connectivity.data == 0] = epsilon_value + + # Use scipy.sparse.csgraph to generate a minimum spanning tree + mst = minimum_spanning_tree(connectivity.tocsr()) + + # Convert the graph to scipy.cluster.hierarchy array format + mst = mst.tocoo() + + # Undo the epsilon values + mst.data[mst.data == epsilon_value] = 0 + + mst_array = np.vstack([mst.row, mst.col, mst.data]).T + + # Sort edges of the min_spanning_tree by weight + mst_array = mst_array[np.argsort(mst_array.T[2], kind="mergesort"), :] + + # Convert edge list into standard hierarchical clustering format + single_linkage_tree = _hierarchical._single_linkage_label(mst_array) + children_ = single_linkage_tree[:, :2].astype(int) + + # Compute parents + parent = np.arange(n_nodes, dtype=np.intp) + for i, (left, right) in enumerate(children_, n_samples): + if n_clusters is not None and i >= n_nodes: + break + if left < n_nodes: + parent[left] = i + if right < n_nodes: + parent[right] = i + + if return_distance: + distances = single_linkage_tree[:, 2] + return children_, n_connected_components, n_samples, parent, distances + return children_, n_connected_components, n_samples, parent + + +############################################################################### +# Hierarchical tree building functions + + +@validate_params( + { + "X": ["array-like"], + "connectivity": ["array-like", "sparse matrix", None], + "n_clusters": [Interval(Integral, 1, None, closed="left"), None], + "return_distance": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def ward_tree(X, *, connectivity=None, n_clusters=None, return_distance=False): + """Ward clustering based on a Feature matrix. + + Recursively merges the pair of clusters that minimally increases + within-cluster variance. + + The inertia matrix uses a Heapq-based representation. + + This is the structured version, that takes into account some topological + structure between samples. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Feature matrix representing `n_samples` samples to be clustered. + + connectivity : {array-like, sparse matrix}, default=None + Connectivity matrix. Defines for each sample the neighboring samples + following a given structure of the data. The matrix is assumed to + be symmetric and only the upper triangular half is used. + Default is None, i.e, the Ward algorithm is unstructured. + + n_clusters : int, default=None + `n_clusters` should be less than `n_samples`. Stop early the + construction of the tree at `n_clusters.` This is useful to decrease + computation time if the number of clusters is not small compared to the + number of samples. In this case, the complete tree is not computed, thus + the 'children' output is of limited use, and the 'parents' output should + rather be used. This option is valid only when specifying a connectivity + matrix. + + return_distance : bool, default=False + If `True`, return the distance between the clusters. + + Returns + ------- + children : ndarray of shape (n_nodes-1, 2) + The children of each non-leaf node. Values less than `n_samples` + correspond to leaves of the tree which are the original samples. + A node `i` greater than or equal to `n_samples` is a non-leaf + node and has children `children_[i - n_samples]`. Alternatively + at the i-th iteration, children[i][0] and children[i][1] + are merged to form node `n_samples + i`. + + n_connected_components : int + The number of connected components in the graph. + + n_leaves : int + The number of leaves in the tree. + + parents : ndarray of shape (n_nodes,) or None + The parent of each node. Only returned when a connectivity matrix + is specified, elsewhere 'None' is returned. + + distances : ndarray of shape (n_nodes-1,) + Only returned if `return_distance` is set to `True` (for compatibility). + The distances between the centers of the nodes. `distances[i]` + corresponds to a weighted Euclidean distance between + the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to + leaves of the tree, then `distances[i]` is their unweighted Euclidean + distance. Distances are updated in the following way + (from scipy.hierarchy.linkage): + + The new entry :math:`d(u,v)` is computed as follows, + + .. math:: + + d(u,v) = \\sqrt{\\frac{|v|+|s|} + {T}d(v,s)^2 + + \\frac{|v|+|t|} + {T}d(v,t)^2 + - \\frac{|v|} + {T}d(s,t)^2} + + where :math:`u` is the newly joined cluster consisting of + clusters :math:`s` and :math:`t`, :math:`v` is an unused + cluster in the forest, :math:`T=|v|+|s|+|t|`, and + :math:`|*|` is the cardinality of its argument. This is also + known as the incremental algorithm. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import ward_tree + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [4, 2], [4, 4], [4, 0]]) + >>> children, n_connected_components, n_leaves, parents = ward_tree(X) + >>> children + array([[0, 1], + [3, 5], + [2, 6], + [4, 7], + [8, 9]]) + >>> n_connected_components + 1 + >>> n_leaves + 6 + """ + X = np.asarray(X) + if X.ndim == 1: + X = np.reshape(X, (-1, 1)) + n_samples, n_features = X.shape + + if connectivity is None: + from scipy.cluster import hierarchy # imports PIL + + if n_clusters is not None: + warnings.warn( + ( + "Partial build of the tree is implemented " + "only for structured clustering (i.e. with " + "explicit connectivity). The algorithm " + "will build the full tree and only " + "retain the lower branches required " + "for the specified number of clusters" + ), + stacklevel=2, + ) + X = np.require(X, requirements="W") + out = hierarchy.ward(X) + children_ = out[:, :2].astype(np.intp) + + if return_distance: + distances = out[:, 2] + return children_, 1, n_samples, None, distances + else: + return children_, 1, n_samples, None + + connectivity, n_connected_components = _fix_connectivity( + X, connectivity, affinity="euclidean" + ) + if n_clusters is None: + n_nodes = 2 * n_samples - 1 + else: + if n_clusters > n_samples: + raise ValueError( + "Cannot provide more clusters than samples. " + "%i n_clusters was asked, and there are %i " + "samples." % (n_clusters, n_samples) + ) + n_nodes = 2 * n_samples - n_clusters + + # create inertia matrix + coord_row = [] + coord_col = [] + A = [] + for ind, row in enumerate(connectivity.rows): + A.append(row) + # We keep only the upper triangular for the moments + # Generator expressions are faster than arrays on the following + row = [i for i in row if i < ind] + coord_row.extend( + len(row) + * [ + ind, + ] + ) + coord_col.extend(row) + + coord_row = np.array(coord_row, dtype=np.intp, order="C") + coord_col = np.array(coord_col, dtype=np.intp, order="C") + + # build moments as a list + moments_1 = np.zeros(n_nodes, order="C") + moments_1[:n_samples] = 1 + moments_2 = np.zeros((n_nodes, n_features), order="C") + moments_2[:n_samples] = X + inertia = np.empty(len(coord_row), dtype=np.float64, order="C") + _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, inertia) + inertia = list(zip(inertia, coord_row, coord_col)) + heapify(inertia) + + # prepare the main fields + parent = np.arange(n_nodes, dtype=np.intp) + used_node = np.ones(n_nodes, dtype=bool) + children = [] + if return_distance: + distances = np.empty(n_nodes - n_samples) + + not_visited = np.empty(n_nodes, dtype=bool, order="C") + + # recursive merge loop + for k in range(n_samples, n_nodes): + # identify the merge + while True: + inert, i, j = heappop(inertia) + if used_node[i] and used_node[j]: + break + parent[i], parent[j] = k, k + children.append((i, j)) + used_node[i] = used_node[j] = False + if return_distance: # store inertia value + distances[k - n_samples] = inert + + # update the moments + moments_1[k] = moments_1[i] + moments_1[j] + moments_2[k] = moments_2[i] + moments_2[j] + + # update the structure matrix A and the inertia matrix + coord_col = [] + not_visited.fill(1) + not_visited[k] = 0 + _hierarchical._get_parents(A[i], coord_col, parent, not_visited) + _hierarchical._get_parents(A[j], coord_col, parent, not_visited) + # List comprehension is faster than a for loop + [A[col].append(k) for col in coord_col] + A.append(coord_col) + coord_col = np.array(coord_col, dtype=np.intp, order="C") + coord_row = np.empty(coord_col.shape, dtype=np.intp, order="C") + coord_row.fill(k) + n_additions = len(coord_row) + ini = np.empty(n_additions, dtype=np.float64, order="C") + + _hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, ini) + + # List comprehension is faster than a for loop + [heappush(inertia, (ini[idx], k, coord_col[idx])) for idx in range(n_additions)] + + # Separate leaves in children (empty lists up to now) + n_leaves = n_samples + # sort children to get consistent output with unstructured version + children = [c[::-1] for c in children] + children = np.array(children) # return numpy array for efficient caching + + if return_distance: + # 2 is scaling factor to compare w/ unstructured version + distances = np.sqrt(2.0 * distances) + return children, n_connected_components, n_leaves, parent, distances + else: + return children, n_connected_components, n_leaves, parent + + +# single average and complete linkage +def linkage_tree( + X, + connectivity=None, + n_clusters=None, + linkage="complete", + affinity="euclidean", + return_distance=False, +): + """Linkage agglomerative clustering based on a Feature matrix. + + The inertia matrix uses a Heapq-based representation. + + This is the structured version, that takes into account some topological + structure between samples. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Feature matrix representing `n_samples` samples to be clustered. + + connectivity : sparse matrix, default=None + Connectivity matrix. Defines for each sample the neighboring samples + following a given structure of the data. The matrix is assumed to + be symmetric and only the upper triangular half is used. + Default is `None`, i.e, the Ward algorithm is unstructured. + + n_clusters : int, default=None + Stop early the construction of the tree at `n_clusters`. This is + useful to decrease computation time if the number of clusters is + not small compared to the number of samples. In this case, the + complete tree is not computed, thus the 'children' output is of + limited use, and the 'parents' output should rather be used. + This option is valid only when specifying a connectivity matrix. + + linkage : {"average", "complete", "single"}, default="complete" + Which linkage criteria to use. The linkage criterion determines which + distance to use between sets of observation. + - "average" uses the average of the distances of each observation of + the two sets. + - "complete" or maximum linkage uses the maximum distances between + all observations of the two sets. + - "single" uses the minimum of the distances between all + observations of the two sets. + + affinity : str or callable, default='euclidean' + Which metric to use. Can be 'euclidean', 'manhattan', or any + distance known to paired distance (see metric.pairwise). + + return_distance : bool, default=False + Whether or not to return the distances between the clusters. + + Returns + ------- + children : ndarray of shape (n_nodes-1, 2) + The children of each non-leaf node. Values less than `n_samples` + correspond to leaves of the tree which are the original samples. + A node `i` greater than or equal to `n_samples` is a non-leaf + node and has children `children_[i - n_samples]`. Alternatively + at the i-th iteration, children[i][0] and children[i][1] + are merged to form node `n_samples + i`. + + n_connected_components : int + The number of connected components in the graph. + + n_leaves : int + The number of leaves in the tree. + + parents : ndarray of shape (n_nodes, ) or None + The parent of each node. Only returned when a connectivity matrix + is specified, elsewhere 'None' is returned. + + distances : ndarray of shape (n_nodes-1,) + Returned when `return_distance` is set to `True`. + + distances[i] refers to the distance between children[i][0] and + children[i][1] when they are merged. + + See Also + -------- + ward_tree : Hierarchical clustering with ward linkage. + """ + X = np.asarray(X) + if X.ndim == 1: + X = np.reshape(X, (-1, 1)) + n_samples, n_features = X.shape + + linkage_choices = { + "complete": _hierarchical.max_merge, + "average": _hierarchical.average_merge, + "single": None, + } # Single linkage is handled differently + try: + join_func = linkage_choices[linkage] + except KeyError as e: + raise ValueError( + "Unknown linkage option, linkage should be one of %s, but %s was given" + % (linkage_choices.keys(), linkage) + ) from e + + if affinity == "cosine" and np.any(~np.any(X, axis=1)): + raise ValueError("Cosine affinity cannot be used when X contains zero vectors") + + if connectivity is None: + from scipy.cluster import hierarchy # imports PIL + + if n_clusters is not None: + warnings.warn( + ( + "Partial build of the tree is implemented " + "only for structured clustering (i.e. with " + "explicit connectivity). The algorithm " + "will build the full tree and only " + "retain the lower branches required " + "for the specified number of clusters" + ), + stacklevel=2, + ) + + if affinity == "precomputed": + # for the linkage function of hierarchy to work on precomputed + # data, provide as first argument an ndarray of the shape returned + # by sklearn.metrics.pairwise_distances. + if X.shape[0] != X.shape[1]: + raise ValueError( + f"Distance matrix should be square, got matrix of shape {X.shape}" + ) + i, j = np.triu_indices(X.shape[0], k=1) + X = X[i, j] + elif affinity == "l2": + # Translate to something understood by scipy + affinity = "euclidean" + elif affinity in ("l1", "manhattan"): + affinity = "cityblock" + elif callable(affinity): + X = affinity(X) + i, j = np.triu_indices(X.shape[0], k=1) + X = X[i, j] + if ( + linkage == "single" + and affinity != "precomputed" + and not callable(affinity) + and affinity in METRIC_MAPPING64 + ): + # We need the fast cythonized metric from neighbors + dist_metric = DistanceMetric.get_metric(affinity) + + # The Cython routines used require contiguous arrays + X = np.ascontiguousarray(X, dtype=np.double) + + mst = _hierarchical.mst_linkage_core(X, dist_metric) + # Sort edges of the min_spanning_tree by weight + mst = mst[np.argsort(mst.T[2], kind="mergesort"), :] + + # Convert edge list into standard hierarchical clustering format + out = _hierarchical.single_linkage_label(mst) + else: + out = hierarchy.linkage(X, method=linkage, metric=affinity) + children_ = out[:, :2].astype(int, copy=False) + + if return_distance: + distances = out[:, 2] + return children_, 1, n_samples, None, distances + return children_, 1, n_samples, None + + connectivity, n_connected_components = _fix_connectivity( + X, connectivity, affinity=affinity + ) + connectivity = connectivity.tocoo() + # Put the diagonal to zero + diag_mask = connectivity.row != connectivity.col + connectivity.row = connectivity.row[diag_mask] + connectivity.col = connectivity.col[diag_mask] + connectivity.data = connectivity.data[diag_mask] + del diag_mask + + if affinity == "precomputed": + distances = X[connectivity.row, connectivity.col].astype(np.float64, copy=False) + else: + # FIXME We compute all the distances, while we could have only computed + # the "interesting" distances + distances = paired_distances( + X[connectivity.row], X[connectivity.col], metric=affinity + ) + connectivity.data = distances + + if n_clusters is None: + n_nodes = 2 * n_samples - 1 + else: + assert n_clusters <= n_samples + n_nodes = 2 * n_samples - n_clusters + + if linkage == "single": + return _single_linkage_tree( + connectivity, + n_samples, + n_nodes, + n_clusters, + n_connected_components, + return_distance, + ) + + if return_distance: + distances = np.empty(n_nodes - n_samples) + # create inertia heap and connection matrix + A = np.empty(n_nodes, dtype=object) + inertia = list() + + # LIL seems to the best format to access the rows quickly, + # without the numpy overhead of slicing CSR indices and data. + connectivity = connectivity.tolil() + # We are storing the graph in a list of IntFloatDict + for ind, (data, row) in enumerate(zip(connectivity.data, connectivity.rows)): + A[ind] = IntFloatDict( + np.asarray(row, dtype=np.intp), np.asarray(data, dtype=np.float64) + ) + # We keep only the upper triangular for the heap + # Generator expressions are faster than arrays on the following + inertia.extend( + _hierarchical.WeightedEdge(d, ind, r) for r, d in zip(row, data) if r < ind + ) + del connectivity + + heapify(inertia) + + # prepare the main fields + parent = np.arange(n_nodes, dtype=np.intp) + used_node = np.ones(n_nodes, dtype=np.intp) + children = [] + + # recursive merge loop + for k in range(n_samples, n_nodes): + # identify the merge + while True: + edge = heappop(inertia) + if used_node[edge.a] and used_node[edge.b]: + break + i = edge.a + j = edge.b + + if return_distance: + # store distances + distances[k - n_samples] = edge.weight + + parent[i] = parent[j] = k + children.append((i, j)) + # Keep track of the number of elements per cluster + n_i = used_node[i] + n_j = used_node[j] + used_node[k] = n_i + n_j + used_node[i] = used_node[j] = False + + # update the structure matrix A and the inertia matrix + # a clever 'min', or 'max' operation between A[i] and A[j] + coord_col = join_func(A[i], A[j], used_node, n_i, n_j) + for col, d in coord_col: + A[col].append(k, d) + # Here we use the information from coord_col (containing the + # distances) to update the heap + heappush(inertia, _hierarchical.WeightedEdge(d, k, col)) + A[k] = coord_col + # Clear A[i] and A[j] to save memory + A[i] = A[j] = 0 + + # Separate leaves in children (empty lists up to now) + n_leaves = n_samples + + # # return numpy array for efficient caching + children = np.array(children)[:, ::-1] + + if return_distance: + return children, n_connected_components, n_leaves, parent, distances + return children, n_connected_components, n_leaves, parent + + +# Matching names to tree-building strategies +def _complete_linkage(*args, **kwargs): + kwargs["linkage"] = "complete" + return linkage_tree(*args, **kwargs) + + +def _average_linkage(*args, **kwargs): + kwargs["linkage"] = "average" + return linkage_tree(*args, **kwargs) + + +def _single_linkage(*args, **kwargs): + kwargs["linkage"] = "single" + return linkage_tree(*args, **kwargs) + + +_TREE_BUILDERS = dict( + ward=ward_tree, + complete=_complete_linkage, + average=_average_linkage, + single=_single_linkage, +) + +############################################################################### +# Functions for cutting hierarchical clustering tree + + +def _hc_cut(n_clusters, children, n_leaves): + """Function cutting the ward tree for a given number of clusters. + + Parameters + ---------- + n_clusters : int or ndarray + The number of clusters to form. + + children : ndarray of shape (n_nodes-1, 2) + The children of each non-leaf node. Values less than `n_samples` + correspond to leaves of the tree which are the original samples. + A node `i` greater than or equal to `n_samples` is a non-leaf + node and has children `children_[i - n_samples]`. Alternatively + at the i-th iteration, children[i][0] and children[i][1] + are merged to form node `n_samples + i`. + + n_leaves : int + Number of leaves of the tree. + + Returns + ------- + labels : array [n_samples] + Cluster labels for each point. + """ + if n_clusters > n_leaves: + raise ValueError( + "Cannot extract more clusters than samples: " + "%s clusters where given for a tree with %s leaves." + % (n_clusters, n_leaves) + ) + # In this function, we store nodes as a heap to avoid recomputing + # the max of the nodes: the first element is always the smallest + # We use negated indices as heaps work on smallest elements, and we + # are interested in largest elements + # children[-1] is the root of the tree + nodes = [-(max(children[-1]) + 1)] + for _ in range(n_clusters - 1): + # As we have a heap, nodes[0] is the smallest element + these_children = children[-nodes[0] - n_leaves] + # Insert the 2 children and remove the largest node + heappush(nodes, -these_children[0]) + heappushpop(nodes, -these_children[1]) + label = np.zeros(n_leaves, dtype=np.intp) + for i, node in enumerate(nodes): + label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i + return label + + +############################################################################### + + +class AgglomerativeClustering(ClusterMixin, BaseEstimator): + """ + Agglomerative Clustering. + + Recursively merges pair of clusters of sample data; uses linkage distance. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_clusters : int or None, default=2 + The number of clusters to find. It must be ``None`` if + ``distance_threshold`` is not ``None``. + + metric : str or callable, default="euclidean" + Metric used to compute the linkage. Can be "euclidean", "l1", "l2", + "manhattan", "cosine", or "precomputed". If linkage is "ward", only + "euclidean" is accepted. If "precomputed", a distance matrix is needed + as input for the fit method. + + .. versionadded:: 1.2 + + .. deprecated:: 1.4 + `metric=None` is deprecated in 1.4 and will be removed in 1.6. + Let `metric` be the default value (i.e. `"euclidean"`) instead. + + memory : str or object with the joblib.Memory interface, default=None + Used to cache the output of the computation of the tree. + By default, no caching is done. If a string is given, it is the + path to the caching directory. + + connectivity : array-like or callable, default=None + Connectivity matrix. Defines for each sample the neighboring + samples following a given structure of the data. + This can be a connectivity matrix itself or a callable that transforms + the data into a connectivity matrix, such as derived from + `kneighbors_graph`. Default is ``None``, i.e, the + hierarchical clustering algorithm is unstructured. + + compute_full_tree : 'auto' or bool, default='auto' + Stop early the construction of the tree at ``n_clusters``. This is + useful to decrease computation time if the number of clusters is not + small compared to the number of samples. This option is useful only + when specifying a connectivity matrix. Note also that when varying the + number of clusters and using caching, it may be advantageous to compute + the full tree. It must be ``True`` if ``distance_threshold`` is not + ``None``. By default `compute_full_tree` is "auto", which is equivalent + to `True` when `distance_threshold` is not `None` or that `n_clusters` + is inferior to the maximum between 100 or `0.02 * n_samples`. + Otherwise, "auto" is equivalent to `False`. + + linkage : {'ward', 'complete', 'average', 'single'}, default='ward' + Which linkage criterion to use. The linkage criterion determines which + distance to use between sets of observation. The algorithm will merge + the pairs of cluster that minimize this criterion. + + - 'ward' minimizes the variance of the clusters being merged. + - 'average' uses the average of the distances of each observation of + the two sets. + - 'complete' or 'maximum' linkage uses the maximum distances between + all observations of the two sets. + - 'single' uses the minimum of the distances between all observations + of the two sets. + + .. versionadded:: 0.20 + Added the 'single' option + + distance_threshold : float, default=None + The linkage distance threshold at or above which clusters will not be + merged. If not ``None``, ``n_clusters`` must be ``None`` and + ``compute_full_tree`` must be ``True``. + + .. versionadded:: 0.21 + + compute_distances : bool, default=False + Computes distances between clusters even if `distance_threshold` is not + used. This can be used to make dendrogram visualization, but introduces + a computational and memory overhead. + + .. versionadded:: 0.24 + + Attributes + ---------- + n_clusters_ : int + The number of clusters found by the algorithm. If + ``distance_threshold=None``, it will be equal to the given + ``n_clusters``. + + labels_ : ndarray of shape (n_samples) + Cluster labels for each point. + + n_leaves_ : int + Number of leaves in the hierarchical tree. + + n_connected_components_ : int + The estimated number of connected components in the graph. + + .. versionadded:: 0.21 + ``n_connected_components_`` was added to replace ``n_components_``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + children_ : array-like of shape (n_samples-1, 2) + The children of each non-leaf node. Values less than `n_samples` + correspond to leaves of the tree which are the original samples. + A node `i` greater than or equal to `n_samples` is a non-leaf + node and has children `children_[i - n_samples]`. Alternatively + at the i-th iteration, children[i][0] and children[i][1] + are merged to form node `n_samples + i`. + + distances_ : array-like of shape (n_nodes-1,) + Distances between nodes in the corresponding place in `children_`. + Only computed if `distance_threshold` is used or `compute_distances` + is set to `True`. + + See Also + -------- + FeatureAgglomeration : Agglomerative clustering but for features instead of + samples. + ward_tree : Hierarchical clustering with ward linkage. + + Examples + -------- + >>> from sklearn.cluster import AgglomerativeClustering + >>> import numpy as np + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [4, 2], [4, 4], [4, 0]]) + >>> clustering = AgglomerativeClustering().fit(X) + >>> clustering + AgglomerativeClustering() + >>> clustering.labels_ + array([1, 1, 1, 0, 0, 0]) + """ + + _parameter_constraints: dict = { + "n_clusters": [Interval(Integral, 1, None, closed="left"), None], + "metric": [ + StrOptions(set(_VALID_METRICS) | {"precomputed"}), + callable, + Hidden(None), + ], + "memory": [str, HasMethods("cache"), None], + "connectivity": ["array-like", callable, None], + "compute_full_tree": [StrOptions({"auto"}), "boolean"], + "linkage": [StrOptions(set(_TREE_BUILDERS.keys()))], + "distance_threshold": [Interval(Real, 0, None, closed="left"), None], + "compute_distances": ["boolean"], + } + + def __init__( + self, + n_clusters=2, + *, + metric="euclidean", + memory=None, + connectivity=None, + compute_full_tree="auto", + linkage="ward", + distance_threshold=None, + compute_distances=False, + ): + self.n_clusters = n_clusters + self.distance_threshold = distance_threshold + self.memory = memory + self.connectivity = connectivity + self.compute_full_tree = compute_full_tree + self.linkage = linkage + self.metric = metric + self.compute_distances = compute_distances + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the hierarchical clustering from features, or distance matrix. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) or \ + (n_samples, n_samples) + Training instances to cluster, or distances between instances if + ``metric='precomputed'``. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Returns the fitted instance. + """ + X = self._validate_data(X, ensure_min_samples=2) + return self._fit(X) + + def _fit(self, X): + """Fit without validation + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples) + Training instances to cluster, or distances between instances if + ``affinity='precomputed'``. + + Returns + ------- + self : object + Returns the fitted instance. + """ + memory = check_memory(self.memory) + + # TODO(1.6): remove in 1.6 + if self.metric is None: + warnings.warn( + ( + "`metric=None` is deprecated in version 1.4 and will be removed in " + "version 1.6. Let `metric` be the default value " + "(i.e. `'euclidean'`) instead." + ), + FutureWarning, + ) + self._metric = "euclidean" + else: + self._metric = self.metric + + if not ((self.n_clusters is None) ^ (self.distance_threshold is None)): + raise ValueError( + "Exactly one of n_clusters and " + "distance_threshold has to be set, and the other " + "needs to be None." + ) + + if self.distance_threshold is not None and not self.compute_full_tree: + raise ValueError( + "compute_full_tree must be True if distance_threshold is set." + ) + + if self.linkage == "ward" and self._metric != "euclidean": + raise ValueError( + f"{self._metric} was provided as metric. Ward can only " + "work with euclidean distances." + ) + + tree_builder = _TREE_BUILDERS[self.linkage] + + connectivity = self.connectivity + if self.connectivity is not None: + if callable(self.connectivity): + connectivity = self.connectivity(X) + connectivity = check_array( + connectivity, accept_sparse=["csr", "coo", "lil"] + ) + + n_samples = len(X) + compute_full_tree = self.compute_full_tree + if self.connectivity is None: + compute_full_tree = True + if compute_full_tree == "auto": + if self.distance_threshold is not None: + compute_full_tree = True + else: + # Early stopping is likely to give a speed up only for + # a large number of clusters. The actual threshold + # implemented here is heuristic + compute_full_tree = self.n_clusters < max(100, 0.02 * n_samples) + n_clusters = self.n_clusters + if compute_full_tree: + n_clusters = None + + # Construct the tree + kwargs = {} + if self.linkage != "ward": + kwargs["linkage"] = self.linkage + kwargs["affinity"] = self._metric + + distance_threshold = self.distance_threshold + + return_distance = (distance_threshold is not None) or self.compute_distances + + out = memory.cache(tree_builder)( + X, + connectivity=connectivity, + n_clusters=n_clusters, + return_distance=return_distance, + **kwargs, + ) + (self.children_, self.n_connected_components_, self.n_leaves_, parents) = out[ + :4 + ] + + if return_distance: + self.distances_ = out[-1] + + if self.distance_threshold is not None: # distance_threshold is used + self.n_clusters_ = ( + np.count_nonzero(self.distances_ >= distance_threshold) + 1 + ) + else: # n_clusters is used + self.n_clusters_ = self.n_clusters + + # Cut the tree + if compute_full_tree: + self.labels_ = _hc_cut(self.n_clusters_, self.children_, self.n_leaves_) + else: + labels = _hierarchical.hc_get_heads(parents, copy=False) + # copy to avoid holding a reference on the original array + labels = np.copy(labels[:n_samples]) + # Reassign cluster numbers + self.labels_ = np.searchsorted(np.unique(labels), labels) + return self + + def fit_predict(self, X, y=None): + """Fit and return the result of each sample's clustering assignment. + + In addition to fitting, this method also return the result of the + clustering assignment for each sample in the training set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or \ + (n_samples, n_samples) + Training instances to cluster, or distances between instances if + ``affinity='precomputed'``. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Cluster labels. + """ + return super().fit_predict(X, y) + + +class FeatureAgglomeration( + ClassNamePrefixFeaturesOutMixin, AgglomerativeClustering, AgglomerationTransform +): + """Agglomerate features. + + Recursively merges pair of clusters of features. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_clusters : int or None, default=2 + The number of clusters to find. It must be ``None`` if + ``distance_threshold`` is not ``None``. + + metric : str or callable, default="euclidean" + Metric used to compute the linkage. Can be "euclidean", "l1", "l2", + "manhattan", "cosine", or "precomputed". If linkage is "ward", only + "euclidean" is accepted. If "precomputed", a distance matrix is needed + as input for the fit method. + + .. versionadded:: 1.2 + + .. deprecated:: 1.4 + `metric=None` is deprecated in 1.4 and will be removed in 1.6. + Let `metric` be the default value (i.e. `"euclidean"`) instead. + + memory : str or object with the joblib.Memory interface, default=None + Used to cache the output of the computation of the tree. + By default, no caching is done. If a string is given, it is the + path to the caching directory. + + connectivity : array-like or callable, default=None + Connectivity matrix. Defines for each feature the neighboring + features following a given structure of the data. + This can be a connectivity matrix itself or a callable that transforms + the data into a connectivity matrix, such as derived from + `kneighbors_graph`. Default is `None`, i.e, the + hierarchical clustering algorithm is unstructured. + + compute_full_tree : 'auto' or bool, default='auto' + Stop early the construction of the tree at `n_clusters`. This is useful + to decrease computation time if the number of clusters is not small + compared to the number of features. This option is useful only when + specifying a connectivity matrix. Note also that when varying the + number of clusters and using caching, it may be advantageous to compute + the full tree. It must be ``True`` if ``distance_threshold`` is not + ``None``. By default `compute_full_tree` is "auto", which is equivalent + to `True` when `distance_threshold` is not `None` or that `n_clusters` + is inferior to the maximum between 100 or `0.02 * n_samples`. + Otherwise, "auto" is equivalent to `False`. + + linkage : {"ward", "complete", "average", "single"}, default="ward" + Which linkage criterion to use. The linkage criterion determines which + distance to use between sets of features. The algorithm will merge + the pairs of cluster that minimize this criterion. + + - "ward" minimizes the variance of the clusters being merged. + - "complete" or maximum linkage uses the maximum distances between + all features of the two sets. + - "average" uses the average of the distances of each feature of + the two sets. + - "single" uses the minimum of the distances between all features + of the two sets. + + pooling_func : callable, default=np.mean + This combines the values of agglomerated features into a single + value, and should accept an array of shape [M, N] and the keyword + argument `axis=1`, and reduce it to an array of size [M]. + + distance_threshold : float, default=None + The linkage distance threshold at or above which clusters will not be + merged. If not ``None``, ``n_clusters`` must be ``None`` and + ``compute_full_tree`` must be ``True``. + + .. versionadded:: 0.21 + + compute_distances : bool, default=False + Computes distances between clusters even if `distance_threshold` is not + used. This can be used to make dendrogram visualization, but introduces + a computational and memory overhead. + + .. versionadded:: 0.24 + + Attributes + ---------- + n_clusters_ : int + The number of clusters found by the algorithm. If + ``distance_threshold=None``, it will be equal to the given + ``n_clusters``. + + labels_ : array-like of (n_features,) + Cluster labels for each feature. + + n_leaves_ : int + Number of leaves in the hierarchical tree. + + n_connected_components_ : int + The estimated number of connected components in the graph. + + .. versionadded:: 0.21 + ``n_connected_components_`` was added to replace ``n_components_``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + children_ : array-like of shape (n_nodes-1, 2) + The children of each non-leaf node. Values less than `n_features` + correspond to leaves of the tree which are the original samples. + A node `i` greater than or equal to `n_features` is a non-leaf + node and has children `children_[i - n_features]`. Alternatively + at the i-th iteration, children[i][0] and children[i][1] + are merged to form node `n_features + i`. + + distances_ : array-like of shape (n_nodes-1,) + Distances between nodes in the corresponding place in `children_`. + Only computed if `distance_threshold` is used or `compute_distances` + is set to `True`. + + See Also + -------- + AgglomerativeClustering : Agglomerative clustering samples instead of + features. + ward_tree : Hierarchical clustering with ward linkage. + + Examples + -------- + >>> import numpy as np + >>> from sklearn import datasets, cluster + >>> digits = datasets.load_digits() + >>> images = digits.images + >>> X = np.reshape(images, (len(images), -1)) + >>> agglo = cluster.FeatureAgglomeration(n_clusters=32) + >>> agglo.fit(X) + FeatureAgglomeration(n_clusters=32) + >>> X_reduced = agglo.transform(X) + >>> X_reduced.shape + (1797, 32) + """ + + _parameter_constraints: dict = { + "n_clusters": [Interval(Integral, 1, None, closed="left"), None], + "metric": [ + StrOptions(set(_VALID_METRICS) | {"precomputed"}), + callable, + Hidden(None), + ], + "memory": [str, HasMethods("cache"), None], + "connectivity": ["array-like", callable, None], + "compute_full_tree": [StrOptions({"auto"}), "boolean"], + "linkage": [StrOptions(set(_TREE_BUILDERS.keys()))], + "pooling_func": [callable], + "distance_threshold": [Interval(Real, 0, None, closed="left"), None], + "compute_distances": ["boolean"], + } + + def __init__( + self, + n_clusters=2, + *, + metric="euclidean", + memory=None, + connectivity=None, + compute_full_tree="auto", + linkage="ward", + pooling_func=np.mean, + distance_threshold=None, + compute_distances=False, + ): + super().__init__( + n_clusters=n_clusters, + memory=memory, + connectivity=connectivity, + compute_full_tree=compute_full_tree, + linkage=linkage, + metric=metric, + distance_threshold=distance_threshold, + compute_distances=compute_distances, + ) + self.pooling_func = pooling_func + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the hierarchical clustering on the data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Returns the transformer. + """ + X = self._validate_data(X, ensure_min_features=2) + super()._fit(X.T) + self._n_features_out = self.n_clusters_ + return self + + @property + def fit_predict(self): + """Fit and return the result of each sample's clustering assignment.""" + raise AttributeError diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_bicluster.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_bicluster.py new file mode 100644 index 0000000000000000000000000000000000000000..65280c06319d99a48f727fc3c3267def7cdc740a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_bicluster.py @@ -0,0 +1,622 @@ +"""Spectral biclustering algorithms.""" +# Authors : Kemal Eren +# License: BSD 3 clause + +from abc import ABCMeta, abstractmethod +from numbers import Integral + +import numpy as np +from scipy.linalg import norm +from scipy.sparse import dia_matrix, issparse +from scipy.sparse.linalg import eigsh, svds + +from ..base import BaseEstimator, BiclusterMixin, _fit_context +from ..utils import check_random_state, check_scalar +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import make_nonnegative, randomized_svd, safe_sparse_dot +from ..utils.validation import assert_all_finite +from ._kmeans import KMeans, MiniBatchKMeans + +__all__ = ["SpectralCoclustering", "SpectralBiclustering"] + + +def _scale_normalize(X): + """Normalize ``X`` by scaling rows and columns independently. + + Returns the normalized matrix and the row and column scaling + factors. + """ + X = make_nonnegative(X) + row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze() + col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze() + row_diag = np.where(np.isnan(row_diag), 0, row_diag) + col_diag = np.where(np.isnan(col_diag), 0, col_diag) + if issparse(X): + n_rows, n_cols = X.shape + r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows)) + c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols)) + an = r * X * c + else: + an = row_diag[:, np.newaxis] * X * col_diag + return an, row_diag, col_diag + + +def _bistochastic_normalize(X, max_iter=1000, tol=1e-5): + """Normalize rows and columns of ``X`` simultaneously so that all + rows sum to one constant and all columns sum to a different + constant. + """ + # According to paper, this can also be done more efficiently with + # deviation reduction and balancing algorithms. + X = make_nonnegative(X) + X_scaled = X + for _ in range(max_iter): + X_new, _, _ = _scale_normalize(X_scaled) + if issparse(X): + dist = norm(X_scaled.data - X.data) + else: + dist = norm(X_scaled - X_new) + X_scaled = X_new + if dist is not None and dist < tol: + break + return X_scaled + + +def _log_normalize(X): + """Normalize ``X`` according to Kluger's log-interactions scheme.""" + X = make_nonnegative(X, min_value=1) + if issparse(X): + raise ValueError( + "Cannot compute log of a sparse matrix," + " because log(x) diverges to -infinity as x" + " goes to 0." + ) + L = np.log(X) + row_avg = L.mean(axis=1)[:, np.newaxis] + col_avg = L.mean(axis=0) + avg = L.mean() + return L - row_avg - col_avg + avg + + +class BaseSpectral(BiclusterMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for spectral biclustering.""" + + _parameter_constraints: dict = { + "svd_method": [StrOptions({"randomized", "arpack"})], + "n_svd_vecs": [Interval(Integral, 0, None, closed="left"), None], + "mini_batch": ["boolean"], + "init": [StrOptions({"k-means++", "random"}), np.ndarray], + "n_init": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + } + + @abstractmethod + def __init__( + self, + n_clusters=3, + svd_method="randomized", + n_svd_vecs=None, + mini_batch=False, + init="k-means++", + n_init=10, + random_state=None, + ): + self.n_clusters = n_clusters + self.svd_method = svd_method + self.n_svd_vecs = n_svd_vecs + self.mini_batch = mini_batch + self.init = init + self.n_init = n_init + self.random_state = random_state + + @abstractmethod + def _check_parameters(self, n_samples): + """Validate parameters depending on the input data.""" + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Create a biclustering for X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + SpectralBiclustering instance. + """ + X = self._validate_data(X, accept_sparse="csr", dtype=np.float64) + self._check_parameters(X.shape[0]) + self._fit(X) + return self + + def _svd(self, array, n_components, n_discard): + """Returns first `n_components` left and right singular + vectors u and v, discarding the first `n_discard`. + """ + if self.svd_method == "randomized": + kwargs = {} + if self.n_svd_vecs is not None: + kwargs["n_oversamples"] = self.n_svd_vecs + u, _, vt = randomized_svd( + array, n_components, random_state=self.random_state, **kwargs + ) + + elif self.svd_method == "arpack": + u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs) + if np.any(np.isnan(vt)): + # some eigenvalues of A * A.T are negative, causing + # sqrt() to be np.nan. This causes some vectors in vt + # to be np.nan. + A = safe_sparse_dot(array.T, array) + random_state = check_random_state(self.random_state) + # initialize with [-1,1] as in ARPACK + v0 = random_state.uniform(-1, 1, A.shape[0]) + _, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0) + vt = v.T + if np.any(np.isnan(u)): + A = safe_sparse_dot(array, array.T) + random_state = check_random_state(self.random_state) + # initialize with [-1,1] as in ARPACK + v0 = random_state.uniform(-1, 1, A.shape[0]) + _, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0) + + assert_all_finite(u) + assert_all_finite(vt) + u = u[:, n_discard:] + vt = vt[n_discard:] + return u, vt.T + + def _k_means(self, data, n_clusters): + if self.mini_batch: + model = MiniBatchKMeans( + n_clusters, + init=self.init, + n_init=self.n_init, + random_state=self.random_state, + ) + else: + model = KMeans( + n_clusters, + init=self.init, + n_init=self.n_init, + random_state=self.random_state, + ) + model.fit(data) + centroid = model.cluster_centers_ + labels = model.labels_ + return centroid, labels + + def _more_tags(self): + return { + "_xfail_checks": { + "check_estimators_dtypes": "raises nan error", + "check_fit2d_1sample": "_scale_normalize fails", + "check_fit2d_1feature": "raises apply_along_axis error", + "check_estimator_sparse_data": "does not fail gracefully", + "check_methods_subset_invariance": "empty array passed inside", + "check_dont_overwrite_parameters": "empty array passed inside", + "check_fit2d_predict1d": "empty array passed inside", + } + } + + +class SpectralCoclustering(BaseSpectral): + """Spectral Co-Clustering algorithm (Dhillon, 2001). + + Clusters rows and columns of an array `X` to solve the relaxed + normalized cut of the bipartite graph created from `X` as follows: + the edge between row vertex `i` and column vertex `j` has weight + `X[i, j]`. + + The resulting bicluster structure is block-diagonal, since each + row and each column belongs to exactly one bicluster. + + Supports sparse matrices, as long as they are nonnegative. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_clusters : int, default=3 + The number of biclusters to find. + + svd_method : {'randomized', 'arpack'}, default='randomized' + Selects the algorithm for finding singular vectors. May be + 'randomized' or 'arpack'. If 'randomized', use + :func:`sklearn.utils.extmath.randomized_svd`, which may be faster + for large matrices. If 'arpack', use + :func:`scipy.sparse.linalg.svds`, which is more accurate, but + possibly slower in some cases. + + n_svd_vecs : int, default=None + Number of vectors to use in calculating the SVD. Corresponds + to `ncv` when `svd_method=arpack` and `n_oversamples` when + `svd_method` is 'randomized`. + + mini_batch : bool, default=False + Whether to use mini-batch k-means, which is faster but may get + different results. + + init : {'k-means++', 'random'}, or ndarray of shape \ + (n_clusters, n_features), default='k-means++' + Method for initialization of k-means algorithm; defaults to + 'k-means++'. + + n_init : int, default=10 + Number of random initializations that are tried with the + k-means algorithm. + + If mini-batch k-means is used, the best initialization is + chosen and the algorithm runs once. Otherwise, the algorithm + is run for each initialization and the best solution chosen. + + random_state : int, RandomState instance, default=None + Used for randomizing the singular value decomposition and the k-means + initialization. Use an int to make the randomness deterministic. + See :term:`Glossary `. + + Attributes + ---------- + rows_ : array-like of shape (n_row_clusters, n_rows) + Results of the clustering. `rows[i, r]` is True if + cluster `i` contains row `r`. Available only after calling ``fit``. + + columns_ : array-like of shape (n_column_clusters, n_columns) + Results of the clustering, like `rows`. + + row_labels_ : array-like of shape (n_rows,) + The bicluster label of each row. + + column_labels_ : array-like of shape (n_cols,) + The bicluster label of each column. + + biclusters_ : tuple of two ndarrays + The tuple contains the `rows_` and `columns_` arrays. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SpectralBiclustering : Partitions rows and columns under the assumption + that the data has an underlying checkerboard structure. + + References + ---------- + * :doi:`Dhillon, Inderjit S, 2001. Co-clustering documents and words using + bipartite spectral graph partitioning. + <10.1145/502512.502550>` + + Examples + -------- + >>> from sklearn.cluster import SpectralCoclustering + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> clustering = SpectralCoclustering(n_clusters=2, random_state=0).fit(X) + >>> clustering.row_labels_ #doctest: +SKIP + array([0, 1, 1, 0, 0, 0], dtype=int32) + >>> clustering.column_labels_ #doctest: +SKIP + array([0, 0], dtype=int32) + >>> clustering + SpectralCoclustering(n_clusters=2, random_state=0) + """ + + _parameter_constraints: dict = { + **BaseSpectral._parameter_constraints, + "n_clusters": [Interval(Integral, 1, None, closed="left")], + } + + def __init__( + self, + n_clusters=3, + *, + svd_method="randomized", + n_svd_vecs=None, + mini_batch=False, + init="k-means++", + n_init=10, + random_state=None, + ): + super().__init__( + n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state + ) + + def _check_parameters(self, n_samples): + if self.n_clusters > n_samples: + raise ValueError( + f"n_clusters should be <= n_samples={n_samples}. Got" + f" {self.n_clusters} instead." + ) + + def _fit(self, X): + normalized_data, row_diag, col_diag = _scale_normalize(X) + n_sv = 1 + int(np.ceil(np.log2(self.n_clusters))) + u, v = self._svd(normalized_data, n_sv, n_discard=1) + z = np.vstack((row_diag[:, np.newaxis] * u, col_diag[:, np.newaxis] * v)) + + _, labels = self._k_means(z, self.n_clusters) + + n_rows = X.shape[0] + self.row_labels_ = labels[:n_rows] + self.column_labels_ = labels[n_rows:] + + self.rows_ = np.vstack([self.row_labels_ == c for c in range(self.n_clusters)]) + self.columns_ = np.vstack( + [self.column_labels_ == c for c in range(self.n_clusters)] + ) + + +class SpectralBiclustering(BaseSpectral): + """Spectral biclustering (Kluger, 2003). + + Partitions rows and columns under the assumption that the data has + an underlying checkerboard structure. For instance, if there are + two row partitions and three column partitions, each row will + belong to three biclusters, and each column will belong to two + biclusters. The outer product of the corresponding row and column + label vectors gives this checkerboard structure. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_clusters : int or tuple (n_row_clusters, n_column_clusters), default=3 + The number of row and column clusters in the checkerboard + structure. + + method : {'bistochastic', 'scale', 'log'}, default='bistochastic' + Method of normalizing and converting singular vectors into + biclusters. May be one of 'scale', 'bistochastic', or 'log'. + The authors recommend using 'log'. If the data is sparse, + however, log normalization will not work, which is why the + default is 'bistochastic'. + + .. warning:: + if `method='log'`, the data must not be sparse. + + n_components : int, default=6 + Number of singular vectors to check. + + n_best : int, default=3 + Number of best singular vectors to which to project the data + for clustering. + + svd_method : {'randomized', 'arpack'}, default='randomized' + Selects the algorithm for finding singular vectors. May be + 'randomized' or 'arpack'. If 'randomized', uses + :func:`~sklearn.utils.extmath.randomized_svd`, which may be faster + for large matrices. If 'arpack', uses + `scipy.sparse.linalg.svds`, which is more accurate, but + possibly slower in some cases. + + n_svd_vecs : int, default=None + Number of vectors to use in calculating the SVD. Corresponds + to `ncv` when `svd_method=arpack` and `n_oversamples` when + `svd_method` is 'randomized`. + + mini_batch : bool, default=False + Whether to use mini-batch k-means, which is faster but may get + different results. + + init : {'k-means++', 'random'} or ndarray of shape (n_clusters, n_features), \ + default='k-means++' + Method for initialization of k-means algorithm; defaults to + 'k-means++'. + + n_init : int, default=10 + Number of random initializations that are tried with the + k-means algorithm. + + If mini-batch k-means is used, the best initialization is + chosen and the algorithm runs once. Otherwise, the algorithm + is run for each initialization and the best solution chosen. + + random_state : int, RandomState instance, default=None + Used for randomizing the singular value decomposition and the k-means + initialization. Use an int to make the randomness deterministic. + See :term:`Glossary `. + + Attributes + ---------- + rows_ : array-like of shape (n_row_clusters, n_rows) + Results of the clustering. `rows[i, r]` is True if + cluster `i` contains row `r`. Available only after calling ``fit``. + + columns_ : array-like of shape (n_column_clusters, n_columns) + Results of the clustering, like `rows`. + + row_labels_ : array-like of shape (n_rows,) + Row partition labels. + + column_labels_ : array-like of shape (n_cols,) + Column partition labels. + + biclusters_ : tuple of two ndarrays + The tuple contains the `rows_` and `columns_` arrays. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SpectralCoclustering : Spectral Co-Clustering algorithm (Dhillon, 2001). + + References + ---------- + + * :doi:`Kluger, Yuval, et. al., 2003. Spectral biclustering of microarray + data: coclustering genes and conditions. + <10.1101/gr.648603>` + + Examples + -------- + >>> from sklearn.cluster import SpectralBiclustering + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> clustering = SpectralBiclustering(n_clusters=2, random_state=0).fit(X) + >>> clustering.row_labels_ + array([1, 1, 1, 0, 0, 0], dtype=int32) + >>> clustering.column_labels_ + array([1, 0], dtype=int32) + >>> clustering + SpectralBiclustering(n_clusters=2, random_state=0) + """ + + _parameter_constraints: dict = { + **BaseSpectral._parameter_constraints, + "n_clusters": [Interval(Integral, 1, None, closed="left"), tuple], + "method": [StrOptions({"bistochastic", "scale", "log"})], + "n_components": [Interval(Integral, 1, None, closed="left")], + "n_best": [Interval(Integral, 1, None, closed="left")], + } + + def __init__( + self, + n_clusters=3, + *, + method="bistochastic", + n_components=6, + n_best=3, + svd_method="randomized", + n_svd_vecs=None, + mini_batch=False, + init="k-means++", + n_init=10, + random_state=None, + ): + super().__init__( + n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state + ) + self.method = method + self.n_components = n_components + self.n_best = n_best + + def _check_parameters(self, n_samples): + if isinstance(self.n_clusters, Integral): + if self.n_clusters > n_samples: + raise ValueError( + f"n_clusters should be <= n_samples={n_samples}. Got" + f" {self.n_clusters} instead." + ) + else: # tuple + try: + n_row_clusters, n_column_clusters = self.n_clusters + check_scalar( + n_row_clusters, + "n_row_clusters", + target_type=Integral, + min_val=1, + max_val=n_samples, + ) + check_scalar( + n_column_clusters, + "n_column_clusters", + target_type=Integral, + min_val=1, + max_val=n_samples, + ) + except (ValueError, TypeError) as e: + raise ValueError( + "Incorrect parameter n_clusters has value:" + f" {self.n_clusters}. It should either be a single integer" + " or an iterable with two integers:" + " (n_row_clusters, n_column_clusters)" + " And the values are should be in the" + " range: (1, n_samples)" + ) from e + + if self.n_best > self.n_components: + raise ValueError( + f"n_best={self.n_best} must be <= n_components={self.n_components}." + ) + + def _fit(self, X): + n_sv = self.n_components + if self.method == "bistochastic": + normalized_data = _bistochastic_normalize(X) + n_sv += 1 + elif self.method == "scale": + normalized_data, _, _ = _scale_normalize(X) + n_sv += 1 + elif self.method == "log": + normalized_data = _log_normalize(X) + n_discard = 0 if self.method == "log" else 1 + u, v = self._svd(normalized_data, n_sv, n_discard) + ut = u.T + vt = v.T + + try: + n_row_clusters, n_col_clusters = self.n_clusters + except TypeError: + n_row_clusters = n_col_clusters = self.n_clusters + + best_ut = self._fit_best_piecewise(ut, self.n_best, n_row_clusters) + + best_vt = self._fit_best_piecewise(vt, self.n_best, n_col_clusters) + + self.row_labels_ = self._project_and_cluster(X, best_vt.T, n_row_clusters) + + self.column_labels_ = self._project_and_cluster(X.T, best_ut.T, n_col_clusters) + + self.rows_ = np.vstack( + [ + self.row_labels_ == label + for label in range(n_row_clusters) + for _ in range(n_col_clusters) + ] + ) + self.columns_ = np.vstack( + [ + self.column_labels_ == label + for _ in range(n_row_clusters) + for label in range(n_col_clusters) + ] + ) + + def _fit_best_piecewise(self, vectors, n_best, n_clusters): + """Find the ``n_best`` vectors that are best approximated by piecewise + constant vectors. + + The piecewise vectors are found by k-means; the best is chosen + according to Euclidean distance. + + """ + + def make_piecewise(v): + centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters) + return centroid[labels].ravel() + + piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vectors) + dists = np.apply_along_axis(norm, axis=1, arr=(vectors - piecewise_vectors)) + result = vectors[np.argsort(dists)[:n_best]] + return result + + def _project_and_cluster(self, data, vectors, n_clusters): + """Project ``data`` to ``vectors`` and cluster the result.""" + projected = safe_sparse_dot(data, vectors) + _, labels = self._k_means(projected, n_clusters) + return labels diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_dbscan_inner.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/cluster/_dbscan_inner.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..667db1311faeba2e5913b686209611926526988f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_dbscan_inner.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__init__.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b08ccde8ee5a2a1bc849a57b29200acbb944e5b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/hdbscan.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/hdbscan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7010d0614633f6f7afc3ae7aeca8edaedfa1bbb9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/__pycache__/hdbscan.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_linkage.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_linkage.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..279891c90c77167e59a3a34a55e5d097ff39865f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_linkage.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_reachability.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_reachability.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0ae697dfa151ee889311e2d8b0bac0d4060227b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_reachability.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e7c4aae7d1b407bb43d4f6a805b6ac14dd1f19b3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.pxd b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.pxd new file mode 100644 index 0000000000000000000000000000000000000000..23708b9a38d07884c035b88e260821146075f861 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/_tree.pxd @@ -0,0 +1,49 @@ +# Copyright (c) 2015, Leland McInnes +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from ...utils._typedefs cimport intp_t, float64_t, uint8_t +cimport numpy as cnp + +# This corresponds to the scipy.cluster.hierarchy format +ctypedef packed struct HIERARCHY_t: + intp_t left_node + intp_t right_node + float64_t value + intp_t cluster_size + +# Effectively an edgelist encoding a parent/child pair, along with a value and +# the corresponding cluster_size in each row providing a tree structure. +ctypedef packed struct CONDENSED_t: + intp_t parent + intp_t child + float64_t value + intp_t cluster_size + +cdef extern from "numpy/arrayobject.h": + intp_t * PyArray_SHAPE(cnp.PyArrayObject *) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/hdbscan.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/hdbscan.py new file mode 100644 index 0000000000000000000000000000000000000000..fc51f10cffba01a4fbe8f40398d2e1b9704cf571 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/hdbscan.py @@ -0,0 +1,1018 @@ +""" +HDBSCAN: Hierarchical Density-Based Spatial Clustering + of Applications with Noise +""" +# Authors: Leland McInnes +# Steve Astels +# John Healy +# Meekail Zain +# Copyright (c) 2015, Leland McInnes +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from numbers import Integral, Real +from warnings import warn + +import numpy as np +from scipy.sparse import csgraph, issparse + +from ...base import BaseEstimator, ClusterMixin, _fit_context +from ...metrics import pairwise_distances +from ...metrics._dist_metrics import DistanceMetric +from ...neighbors import BallTree, KDTree, NearestNeighbors +from ...utils._param_validation import Interval, StrOptions +from ...utils.validation import _allclose_dense_sparse, _assert_all_finite +from ._linkage import ( + MST_edge_dtype, + make_single_linkage, + mst_from_data_matrix, + mst_from_mutual_reachability, +) +from ._reachability import mutual_reachability_graph +from ._tree import HIERARCHY_dtype, labelling_at_cut, tree_to_labels + +FAST_METRICS = set(KDTree.valid_metrics + BallTree.valid_metrics) + +# Encodings are arbitrary but must be strictly negative. +# The current encodings are chosen as extensions to the -1 noise label. +# Avoided enums so that the end user only deals with simple labels. +_OUTLIER_ENCODING: dict = { + "infinite": { + "label": -2, + # The probability could also be 1, since infinite points are certainly + # infinite outliers, however 0 is convention from the HDBSCAN library + # implementation. + "prob": 0, + }, + "missing": { + "label": -3, + # A nan probability is chosen to emphasize the fact that the + # corresponding data was not considered in the clustering problem. + "prob": np.nan, + }, +} + + +def _brute_mst(mutual_reachability, min_samples): + """ + Builds a minimum spanning tree (MST) from the provided mutual-reachability + values. This function dispatches to a custom Cython implementation for + dense arrays, and `scipy.sparse.csgraph.minimum_spanning_tree` for sparse + arrays/matrices. + + Parameters + ---------- + mututal_reachability_graph: {ndarray, sparse matrix} of shape \ + (n_samples, n_samples) + Weighted adjacency matrix of the mutual reachability graph. + + min_samples : int, default=None + The number of samples in a neighborhood for a point + to be considered as a core point. This includes the point itself. + + Returns + ------- + mst : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype + The MST representation of the mutual-reachability graph. The MST is + represented as a collection of edges. + """ + if not issparse(mutual_reachability): + return mst_from_mutual_reachability(mutual_reachability) + + # Check if the mutual reachability matrix has any rows which have + # less than `min_samples` non-zero elements. + indptr = mutual_reachability.indptr + num_points = mutual_reachability.shape[0] + if any((indptr[i + 1] - indptr[i]) < min_samples for i in range(num_points)): + raise ValueError( + f"There exists points with fewer than {min_samples} neighbors. Ensure" + " your distance matrix has non-zero values for at least" + f" `min_sample`={min_samples} neighbors for each points (i.e. K-nn" + " graph), or specify a `max_distance` in `metric_params` to use when" + " distances are missing." + ) + # Check connected component on mutual reachability. + # If more than one connected component is present, + # it means that the graph is disconnected. + n_components = csgraph.connected_components( + mutual_reachability, directed=False, return_labels=False + ) + if n_components > 1: + raise ValueError( + f"Sparse mutual reachability matrix has {n_components} connected" + " components. HDBSCAN cannot be perfomed on a disconnected graph. Ensure" + " that the sparse distance matrix has only one connected component." + ) + + # Compute the minimum spanning tree for the sparse graph + sparse_min_spanning_tree = csgraph.minimum_spanning_tree(mutual_reachability) + rows, cols = sparse_min_spanning_tree.nonzero() + mst = np.rec.fromarrays( + [rows, cols, sparse_min_spanning_tree.data], + dtype=MST_edge_dtype, + ) + return mst + + +def _process_mst(min_spanning_tree): + """ + Builds a single-linkage tree (SLT) from the provided minimum spanning tree + (MST). The MST is first sorted then processed by a custom Cython routine. + + Parameters + ---------- + min_spanning_tree : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype + The MST representation of the mutual-reachability graph. The MST is + represented as a collection of edges. + + Returns + ------- + single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype + The single-linkage tree tree (dendrogram) built from the MST. + """ + # Sort edges of the min_spanning_tree by weight + row_order = np.argsort(min_spanning_tree["distance"]) + min_spanning_tree = min_spanning_tree[row_order] + # Convert edge list into standard hierarchical clustering format + return make_single_linkage(min_spanning_tree) + + +def _hdbscan_brute( + X, + min_samples=5, + alpha=None, + metric="euclidean", + n_jobs=None, + copy=False, + **metric_params, +): + """ + Builds a single-linkage tree (SLT) from the input data `X`. If + `metric="precomputed"` then `X` must be a symmetric array of distances. + Otherwise, the pairwise distances are calculated directly and passed to + `mutual_reachability_graph`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples) + Either the raw data from which to compute the pairwise distances, + or the precomputed distances. + + min_samples : int, default=None + The number of samples in a neighborhood for a point + to be considered as a core point. This includes the point itself. + + alpha : float, default=1.0 + A distance scaling parameter as used in robust single linkage. + + metric : str or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. + + - If metric is a string or callable, it must be one of + the options allowed by :func:`~sklearn.metrics.pairwise_distances` + for its metric parameter. + + - If metric is "precomputed", X is assumed to be a distance matrix and + must be square. + + n_jobs : int, default=None + The number of jobs to use for computing the pairwise distances. This + works by breaking down the pairwise matrix into n_jobs even slices and + computing them in parallel. This parameter is passed directly to + :func:`~sklearn.metrics.pairwise_distances`. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + copy : bool, default=False + If `copy=True` then any time an in-place modifications would be made + that would overwrite `X`, a copy will first be made, guaranteeing that + the original data will be unchanged. Currently, it only applies when + `metric="precomputed"`, when passing a dense array or a CSR sparse + array/matrix. + + metric_params : dict, default=None + Arguments passed to the distance metric. + + Returns + ------- + single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype + The single-linkage tree tree (dendrogram) built from the MST. + """ + if metric == "precomputed": + if X.shape[0] != X.shape[1]: + raise ValueError( + "The precomputed distance matrix is expected to be symmetric, however" + f" it has shape {X.shape}. Please verify that the" + " distance matrix was constructed correctly." + ) + if not _allclose_dense_sparse(X, X.T): + raise ValueError( + "The precomputed distance matrix is expected to be symmetric, however" + " its values appear to be asymmetric. Please verify that the distance" + " matrix was constructed correctly." + ) + + distance_matrix = X.copy() if copy else X + else: + distance_matrix = pairwise_distances( + X, metric=metric, n_jobs=n_jobs, **metric_params + ) + distance_matrix /= alpha + + max_distance = metric_params.get("max_distance", 0.0) + if issparse(distance_matrix) and distance_matrix.format != "csr": + # we need CSR format to avoid a conversion in `_brute_mst` when calling + # `csgraph.connected_components` + distance_matrix = distance_matrix.tocsr() + + # Note that `distance_matrix` is manipulated in-place, however we do not + # need it for anything else past this point, hence the operation is safe. + mutual_reachability_ = mutual_reachability_graph( + distance_matrix, min_samples=min_samples, max_distance=max_distance + ) + min_spanning_tree = _brute_mst(mutual_reachability_, min_samples=min_samples) + # Warn if the MST couldn't be constructed around the missing distances + if np.isinf(min_spanning_tree["distance"]).any(): + warn( + ( + "The minimum spanning tree contains edge weights with value " + "infinity. Potentially, you are missing too many distances " + "in the initial distance matrix for the given neighborhood " + "size." + ), + UserWarning, + ) + return _process_mst(min_spanning_tree) + + +def _hdbscan_prims( + X, + algo, + min_samples=5, + alpha=1.0, + metric="euclidean", + leaf_size=40, + n_jobs=None, + **metric_params, +): + """ + Builds a single-linkage tree (SLT) from the input data `X`. If + `metric="precomputed"` then `X` must be a symmetric array of distances. + Otherwise, the pairwise distances are calculated directly and passed to + `mutual_reachability_graph`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The raw data. + + min_samples : int, default=None + The number of samples in a neighborhood for a point + to be considered as a core point. This includes the point itself. + + alpha : float, default=1.0 + A distance scaling parameter as used in robust single linkage. + + metric : str or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. `metric` must be one of the options allowed by + :func:`~sklearn.metrics.pairwise_distances` for its metric + parameter. + + n_jobs : int, default=None + The number of jobs to use for computing the pairwise distances. This + works by breaking down the pairwise matrix into n_jobs even slices and + computing them in parallel. This parameter is passed directly to + :func:`~sklearn.metrics.pairwise_distances`. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + copy : bool, default=False + If `copy=True` then any time an in-place modifications would be made + that would overwrite `X`, a copy will first be made, guaranteeing that + the original data will be unchanged. Currently, it only applies when + `metric="precomputed"`, when passing a dense array or a CSR sparse + array/matrix. + + metric_params : dict, default=None + Arguments passed to the distance metric. + + Returns + ------- + single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype + The single-linkage tree tree (dendrogram) built from the MST. + """ + # The Cython routines used require contiguous arrays + X = np.asarray(X, order="C") + + # Get distance to kth nearest neighbour + nbrs = NearestNeighbors( + n_neighbors=min_samples, + algorithm=algo, + leaf_size=leaf_size, + metric=metric, + metric_params=metric_params, + n_jobs=n_jobs, + p=None, + ).fit(X) + + neighbors_distances, _ = nbrs.kneighbors(X, min_samples, return_distance=True) + core_distances = np.ascontiguousarray(neighbors_distances[:, -1]) + dist_metric = DistanceMetric.get_metric(metric, **metric_params) + + # Mutual reachability distance is implicit in mst_from_data_matrix + min_spanning_tree = mst_from_data_matrix(X, core_distances, dist_metric, alpha) + return _process_mst(min_spanning_tree) + + +def remap_single_linkage_tree(tree, internal_to_raw, non_finite): + """ + Takes an internal single_linkage_tree structure and adds back in a set of points + that were initially detected as non-finite and returns that new tree. + These points will all be merged into the final node at np.inf distance and + considered noise points. + + Parameters + ---------- + tree : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype + The single-linkage tree tree (dendrogram) built from the MST. + internal_to_raw: dict + A mapping from internal integer index to the raw integer index + non_finite : ndarray + Boolean array of which entries in the raw data are non-finite + """ + finite_count = len(internal_to_raw) + + outlier_count = len(non_finite) + for i, _ in enumerate(tree): + left = tree[i]["left_node"] + right = tree[i]["right_node"] + + if left < finite_count: + tree[i]["left_node"] = internal_to_raw[left] + else: + tree[i]["left_node"] = left + outlier_count + if right < finite_count: + tree[i]["right_node"] = internal_to_raw[right] + else: + tree[i]["right_node"] = right + outlier_count + + outlier_tree = np.zeros(len(non_finite), dtype=HIERARCHY_dtype) + last_cluster_id = max( + tree[tree.shape[0] - 1]["left_node"], tree[tree.shape[0] - 1]["right_node"] + ) + last_cluster_size = tree[tree.shape[0] - 1]["cluster_size"] + for i, outlier in enumerate(non_finite): + outlier_tree[i] = (outlier, last_cluster_id + 1, np.inf, last_cluster_size + 1) + last_cluster_id += 1 + last_cluster_size += 1 + tree = np.concatenate([tree, outlier_tree]) + return tree + + +def _get_finite_row_indices(matrix): + """ + Returns the indices of the purely finite rows of a + sparse matrix or dense ndarray + """ + if issparse(matrix): + row_indices = np.array( + [i for i, row in enumerate(matrix.tolil().data) if np.all(np.isfinite(row))] + ) + else: + (row_indices,) = np.isfinite(matrix.sum(axis=1)).nonzero() + return row_indices + + +class HDBSCAN(ClusterMixin, BaseEstimator): + """Cluster data using hierarchical density-based clustering. + + HDBSCAN - Hierarchical Density-Based Spatial Clustering of Applications + with Noise. Performs :class:`~sklearn.cluster.DBSCAN` over varying epsilon + values and integrates the result to find a clustering that gives the best + stability over epsilon. + This allows HDBSCAN to find clusters of varying densities (unlike + :class:`~sklearn.cluster.DBSCAN`), and be more robust to parameter selection. + Read more in the :ref:`User Guide `. + + For an example of how to use HDBSCAN, as well as a comparison to + :class:`~sklearn.cluster.DBSCAN`, please see the :ref:`plotting demo + `. + + .. versionadded:: 1.3 + + Parameters + ---------- + min_cluster_size : int, default=5 + The minimum number of samples in a group for that group to be + considered a cluster; groupings smaller than this size will be left + as noise. + + min_samples : int, default=None + The number of samples in a neighborhood for a point + to be considered as a core point. This includes the point itself. + When `None`, defaults to `min_cluster_size`. + + cluster_selection_epsilon : float, default=0.0 + A distance threshold. Clusters below this value will be merged. + See [5]_ for more information. + + max_cluster_size : int, default=None + A limit to the size of clusters returned by the `"eom"` cluster + selection algorithm. There is no limit when `max_cluster_size=None`. + Has no effect if `cluster_selection_method="leaf"`. + + metric : str or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. + + - If metric is a string or callable, it must be one of + the options allowed by :func:`~sklearn.metrics.pairwise_distances` + for its metric parameter. + + - If metric is "precomputed", X is assumed to be a distance matrix and + must be square. + + metric_params : dict, default=None + Arguments passed to the distance metric. + + alpha : float, default=1.0 + A distance scaling parameter as used in robust single linkage. + See [3]_ for more information. + + algorithm : {"auto", "brute", "kd_tree", "ball_tree"}, default="auto" + Exactly which algorithm to use for computing core distances; By default + this is set to `"auto"` which attempts to use a + :class:`~sklearn.neighbors.KDTree` tree if possible, otherwise it uses + a :class:`~sklearn.neighbors.BallTree` tree. Both `"kd_tree"` and + `"ball_tree"` algorithms use the + :class:`~sklearn.neighbors.NearestNeighbors` estimator. + + If the `X` passed during `fit` is sparse or `metric` is invalid for + both :class:`~sklearn.neighbors.KDTree` and + :class:`~sklearn.neighbors.BallTree`, then it resolves to use the + `"brute"` algorithm. + + .. deprecated:: 1.4 + The `'kdtree'` option was deprecated in version 1.4, + and will be renamed to `'kd_tree'` in 1.6. + + .. deprecated:: 1.4 + The `'balltree'` option was deprecated in version 1.4, + and will be renamed to `'ball_tree'` in 1.6. + + leaf_size : int, default=40 + Leaf size for trees responsible for fast nearest neighbour queries when + a KDTree or a BallTree are used as core-distance algorithms. A large + dataset size and small `leaf_size` may induce excessive memory usage. + If you are running out of memory consider increasing the `leaf_size` + parameter. Ignored for `algorithm="brute"`. + + n_jobs : int, default=None + Number of jobs to run in parallel to calculate distances. + `None` means 1 unless in a :obj:`joblib.parallel_backend` context. + `-1` means using all processors. See :term:`Glossary ` + for more details. + + cluster_selection_method : {"eom", "leaf"}, default="eom" + The method used to select clusters from the condensed tree. The + standard approach for HDBSCAN* is to use an Excess of Mass (`"eom"`) + algorithm to find the most persistent clusters. Alternatively you can + instead select the clusters at the leaves of the tree -- this provides + the most fine grained and homogeneous clusters. + + allow_single_cluster : bool, default=False + By default HDBSCAN* will not produce a single cluster, setting this + to True will override this and allow single cluster results in + the case that you feel this is a valid result for your dataset. + + store_centers : str, default=None + Which, if any, cluster centers to compute and store. The options are: + + - `None` which does not compute nor store any centers. + - `"centroid"` which calculates the center by taking the weighted + average of their positions. Note that the algorithm uses the + euclidean metric and does not guarantee that the output will be + an observed data point. + - `"medoid"` which calculates the center by taking the point in the + fitted data which minimizes the distance to all other points in + the cluster. This is slower than "centroid" since it requires + computing additional pairwise distances between points of the + same cluster but guarantees the output is an observed data point. + The medoid is also well-defined for arbitrary metrics, and does not + depend on a euclidean metric. + - `"both"` which computes and stores both forms of centers. + + copy : bool, default=False + If `copy=True` then any time an in-place modifications would be made + that would overwrite data passed to :term:`fit`, a copy will first be + made, guaranteeing that the original data will be unchanged. + Currently, it only applies when `metric="precomputed"`, when passing + a dense array or a CSR sparse matrix and when `algorithm="brute"`. + + Attributes + ---------- + labels_ : ndarray of shape (n_samples,) + Cluster labels for each point in the dataset given to :term:`fit`. + Outliers are labeled as follows: + + - Noisy samples are given the label -1. + - Samples with infinite elements (+/- np.inf) are given the label -2. + - Samples with missing data are given the label -3, even if they + also have infinite elements. + + probabilities_ : ndarray of shape (n_samples,) + The strength with which each sample is a member of its assigned + cluster. + + - Clustered samples have probabilities proportional to the degree that + they persist as part of the cluster. + - Noisy samples have probability zero. + - Samples with infinite elements (+/- np.inf) have probability 0. + - Samples with missing data have probability `np.nan`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + centroids_ : ndarray of shape (n_clusters, n_features) + A collection containing the centroid of each cluster calculated under + the standard euclidean metric. The centroids may fall "outside" their + respective clusters if the clusters themselves are non-convex. + + Note that `n_clusters` only counts non-outlier clusters. That is to + say, the `-1, -2, -3` labels for the outlier clusters are excluded. + + medoids_ : ndarray of shape (n_clusters, n_features) + A collection containing the medoid of each cluster calculated under + the whichever metric was passed to the `metric` parameter. The + medoids are points in the original cluster which minimize the average + distance to all other points in that cluster under the chosen metric. + These can be thought of as the result of projecting the `metric`-based + centroid back onto the cluster. + + Note that `n_clusters` only counts non-outlier clusters. That is to + say, the `-1, -2, -3` labels for the outlier clusters are excluded. + + See Also + -------- + DBSCAN : Density-Based Spatial Clustering of Applications + with Noise. + OPTICS : Ordering Points To Identify the Clustering Structure. + Birch : Memory-efficient, online-learning algorithm. + + References + ---------- + + .. [1] :doi:`Campello, R. J., Moulavi, D., & Sander, J. Density-based clustering + based on hierarchical density estimates. + <10.1007/978-3-642-37456-2_14>` + .. [2] :doi:`Campello, R. J., Moulavi, D., Zimek, A., & Sander, J. + Hierarchical density estimates for data clustering, visualization, + and outlier detection.<10.1145/2733381>` + + .. [3] `Chaudhuri, K., & Dasgupta, S. Rates of convergence for the + cluster tree. + `_ + + .. [4] `Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and + Sander, J. Density-Based Clustering Validation. + `_ + + .. [5] :arxiv:`Malzer, C., & Baum, M. "A Hybrid Approach To Hierarchical + Density-based Cluster Selection."<1911.02282>`. + + Examples + -------- + >>> from sklearn.cluster import HDBSCAN + >>> from sklearn.datasets import load_digits + >>> X, _ = load_digits(return_X_y=True) + >>> hdb = HDBSCAN(min_cluster_size=20) + >>> hdb.fit(X) + HDBSCAN(min_cluster_size=20) + >>> hdb.labels_ + array([ 2, 6, -1, ..., -1, -1, -1]) + """ + + _parameter_constraints = { + "min_cluster_size": [Interval(Integral, left=2, right=None, closed="left")], + "min_samples": [Interval(Integral, left=1, right=None, closed="left"), None], + "cluster_selection_epsilon": [ + Interval(Real, left=0, right=None, closed="left") + ], + "max_cluster_size": [ + None, + Interval(Integral, left=1, right=None, closed="left"), + ], + "metric": [StrOptions(FAST_METRICS | {"precomputed"}), callable], + "metric_params": [dict, None], + "alpha": [Interval(Real, left=0, right=None, closed="neither")], + # TODO(1.6): Remove "kdtree" and "balltree" option + "algorithm": [ + StrOptions( + {"auto", "brute", "kd_tree", "ball_tree", "kdtree", "balltree"}, + deprecated={"kdtree", "balltree"}, + ), + ], + "leaf_size": [Interval(Integral, left=1, right=None, closed="left")], + "n_jobs": [Integral, None], + "cluster_selection_method": [StrOptions({"eom", "leaf"})], + "allow_single_cluster": ["boolean"], + "store_centers": [None, StrOptions({"centroid", "medoid", "both"})], + "copy": ["boolean"], + } + + def __init__( + self, + min_cluster_size=5, + min_samples=None, + cluster_selection_epsilon=0.0, + max_cluster_size=None, + metric="euclidean", + metric_params=None, + alpha=1.0, + algorithm="auto", + leaf_size=40, + n_jobs=None, + cluster_selection_method="eom", + allow_single_cluster=False, + store_centers=None, + copy=False, + ): + self.min_cluster_size = min_cluster_size + self.min_samples = min_samples + self.alpha = alpha + self.max_cluster_size = max_cluster_size + self.cluster_selection_epsilon = cluster_selection_epsilon + self.metric = metric + self.metric_params = metric_params + self.algorithm = algorithm + self.leaf_size = leaf_size + self.n_jobs = n_jobs + self.cluster_selection_method = cluster_selection_method + self.allow_single_cluster = allow_single_cluster + self.store_centers = store_centers + self.copy = copy + + @_fit_context( + # HDBSCAN.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Find clusters based on hierarchical density-based clustering. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ + ndarray of shape (n_samples, n_samples) + A feature array, or array of distances between samples if + `metric='precomputed'`. + + y : None + Ignored. + + Returns + ------- + self : object + Returns self. + """ + if self.metric == "precomputed" and self.store_centers is not None: + raise ValueError( + "Cannot store centers when using a precomputed distance matrix." + ) + + self._metric_params = self.metric_params or {} + if self.metric != "precomputed": + # Non-precomputed matrices may contain non-finite values. + X = self._validate_data( + X, + accept_sparse=["csr", "lil"], + force_all_finite=False, + dtype=np.float64, + ) + self._raw_data = X + all_finite = True + try: + _assert_all_finite(X.data if issparse(X) else X) + except ValueError: + all_finite = False + + if not all_finite: + # Pass only the purely finite indices into hdbscan + # We will later assign all non-finite points their + # corresponding labels, as specified in `_OUTLIER_ENCODING` + + # Reduce X to make the checks for missing/outlier samples more + # convenient. + reduced_X = X.sum(axis=1) + + # Samples with missing data are denoted by the presence of + # `np.nan` + missing_index = np.isnan(reduced_X).nonzero()[0] + + # Outlier samples are denoted by the presence of `np.inf` + infinite_index = np.isinf(reduced_X).nonzero()[0] + + # Continue with only finite samples + finite_index = _get_finite_row_indices(X) + internal_to_raw = {x: y for x, y in enumerate(finite_index)} + X = X[finite_index] + elif issparse(X): + # Handle sparse precomputed distance matrices separately + X = self._validate_data( + X, + accept_sparse=["csr", "lil"], + dtype=np.float64, + ) + else: + # Only non-sparse, precomputed distance matrices are handled here + # and thereby allowed to contain numpy.inf for missing distances + + # Perform data validation after removing infinite values (numpy.inf) + # from the given distance matrix. + X = self._validate_data(X, force_all_finite=False, dtype=np.float64) + if np.isnan(X).any(): + # TODO: Support np.nan in Cython implementation for precomputed + # dense HDBSCAN + raise ValueError("np.nan values found in precomputed-dense") + if X.shape[0] == 1: + raise ValueError("n_samples=1 while HDBSCAN requires more than one sample") + self._min_samples = ( + self.min_cluster_size if self.min_samples is None else self.min_samples + ) + + if self._min_samples > X.shape[0]: + raise ValueError( + f"min_samples ({self._min_samples}) must be at most the number of" + f" samples in X ({X.shape[0]})" + ) + + # TODO(1.6): Remove + if self.algorithm == "kdtree": + warn( + ( + "`algorithm='kdtree'`has been deprecated in 1.4 and will be renamed" + " to'kd_tree'`in 1.6. To keep the past behaviour, set" + " `algorithm='kd_tree'`." + ), + FutureWarning, + ) + self.algorithm = "kd_tree" + + # TODO(1.6): Remove + if self.algorithm == "balltree": + warn( + ( + "`algorithm='balltree'`has been deprecated in 1.4 and will be" + " renamed to'ball_tree'`in 1.6. To keep the past behaviour, set" + " `algorithm='ball_tree'`." + ), + FutureWarning, + ) + self.algorithm = "ball_tree" + + mst_func = None + kwargs = dict( + X=X, + min_samples=self._min_samples, + alpha=self.alpha, + metric=self.metric, + n_jobs=self.n_jobs, + **self._metric_params, + ) + if self.algorithm == "kd_tree" and self.metric not in KDTree.valid_metrics: + raise ValueError( + f"{self.metric} is not a valid metric for a KDTree-based algorithm." + " Please select a different metric." + ) + elif ( + self.algorithm == "ball_tree" and self.metric not in BallTree.valid_metrics + ): + raise ValueError( + f"{self.metric} is not a valid metric for a BallTree-based algorithm." + " Please select a different metric." + ) + + if self.algorithm != "auto": + if ( + self.metric != "precomputed" + and issparse(X) + and self.algorithm != "brute" + ): + raise ValueError("Sparse data matrices only support algorithm `brute`.") + + if self.algorithm == "brute": + mst_func = _hdbscan_brute + kwargs["copy"] = self.copy + elif self.algorithm == "kd_tree": + mst_func = _hdbscan_prims + kwargs["algo"] = "kd_tree" + kwargs["leaf_size"] = self.leaf_size + else: + mst_func = _hdbscan_prims + kwargs["algo"] = "ball_tree" + kwargs["leaf_size"] = self.leaf_size + else: + if issparse(X) or self.metric not in FAST_METRICS: + # We can't do much with sparse matrices ... + mst_func = _hdbscan_brute + kwargs["copy"] = self.copy + elif self.metric in KDTree.valid_metrics: + # TODO: Benchmark KD vs Ball Tree efficiency + mst_func = _hdbscan_prims + kwargs["algo"] = "kd_tree" + kwargs["leaf_size"] = self.leaf_size + else: + # Metric is a valid BallTree metric + mst_func = _hdbscan_prims + kwargs["algo"] = "ball_tree" + kwargs["leaf_size"] = self.leaf_size + + self._single_linkage_tree_ = mst_func(**kwargs) + + self.labels_, self.probabilities_ = tree_to_labels( + self._single_linkage_tree_, + self.min_cluster_size, + self.cluster_selection_method, + self.allow_single_cluster, + self.cluster_selection_epsilon, + self.max_cluster_size, + ) + if self.metric != "precomputed" and not all_finite: + # Remap indices to align with original data in the case of + # non-finite entries. Samples with np.inf are mapped to -1 and + # those with np.nan are mapped to -2. + self._single_linkage_tree_ = remap_single_linkage_tree( + self._single_linkage_tree_, + internal_to_raw, + # There may be overlap for points w/ both `np.inf` and `np.nan` + non_finite=set(np.hstack([infinite_index, missing_index])), + ) + new_labels = np.empty(self._raw_data.shape[0], dtype=np.int32) + new_labels[finite_index] = self.labels_ + new_labels[infinite_index] = _OUTLIER_ENCODING["infinite"]["label"] + new_labels[missing_index] = _OUTLIER_ENCODING["missing"]["label"] + self.labels_ = new_labels + + new_probabilities = np.zeros(self._raw_data.shape[0], dtype=np.float64) + new_probabilities[finite_index] = self.probabilities_ + # Infinite outliers have probability 0 by convention, though this + # is arbitrary. + new_probabilities[infinite_index] = _OUTLIER_ENCODING["infinite"]["prob"] + new_probabilities[missing_index] = _OUTLIER_ENCODING["missing"]["prob"] + self.probabilities_ = new_probabilities + + if self.store_centers: + self._weighted_cluster_center(X) + return self + + def fit_predict(self, X, y=None): + """Cluster X and return the associated cluster labels. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ + ndarray of shape (n_samples, n_samples) + A feature array, or array of distances between samples if + `metric='precomputed'`. + + y : None + Ignored. + + Returns + ------- + y : ndarray of shape (n_samples,) + Cluster labels. + """ + self.fit(X) + return self.labels_ + + def _weighted_cluster_center(self, X): + """Calculate and store the centroids/medoids of each cluster. + + This requires `X` to be a raw feature array, not precomputed + distances. Rather than return outputs directly, this helper method + instead stores them in the `self.{centroids, medoids}_` attributes. + The choice for which attributes are calculated and stored is mediated + by the value of `self.store_centers`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The feature array that the estimator was fit with. + + """ + # Number of non-noise clusters + n_clusters = len(set(self.labels_) - {-1, -2}) + mask = np.empty((X.shape[0],), dtype=np.bool_) + make_centroids = self.store_centers in ("centroid", "both") + make_medoids = self.store_centers in ("medoid", "both") + + if make_centroids: + self.centroids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64) + if make_medoids: + self.medoids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64) + + # Need to handle iteratively seen each cluster may have a different + # number of samples, hence we can't create a homogeneous 3D array. + for idx in range(n_clusters): + mask = self.labels_ == idx + data = X[mask] + strength = self.probabilities_[mask] + if make_centroids: + self.centroids_[idx] = np.average(data, weights=strength, axis=0) + if make_medoids: + # TODO: Implement weighted argmin PWD backend + dist_mat = pairwise_distances( + data, metric=self.metric, **self._metric_params + ) + dist_mat = dist_mat * strength + medoid_index = np.argmin(dist_mat.sum(axis=1)) + self.medoids_[idx] = data[medoid_index] + return + + def dbscan_clustering(self, cut_distance, min_cluster_size=5): + """Return clustering given by DBSCAN without border points. + + Return clustering that would be equivalent to running DBSCAN* for a + particular cut_distance (or epsilon) DBSCAN* can be thought of as + DBSCAN without the border points. As such these results may differ + slightly from `cluster.DBSCAN` due to the difference in implementation + over the non-core points. + + This can also be thought of as a flat clustering derived from constant + height cut through the single linkage tree. + + This represents the result of selecting a cut value for robust single linkage + clustering. The `min_cluster_size` allows the flat clustering to declare noise + points (and cluster smaller than `min_cluster_size`). + + Parameters + ---------- + cut_distance : float + The mutual reachability distance cut value to use to generate a + flat clustering. + + min_cluster_size : int, default=5 + Clusters smaller than this value with be called 'noise' and remain + unclustered in the resulting flat clustering. + + Returns + ------- + labels : ndarray of shape (n_samples,) + An array of cluster labels, one per datapoint. + Outliers are labeled as follows: + + - Noisy samples are given the label -1. + - Samples with infinite elements (+/- np.inf) are given the label -2. + - Samples with missing data are given the label -3, even if they + also have infinite elements. + """ + labels = labelling_at_cut( + self._single_linkage_tree_, cut_distance, min_cluster_size + ) + # Infer indices from labels generated during `fit` + infinite_index = self.labels_ == _OUTLIER_ENCODING["infinite"]["label"] + missing_index = self.labels_ == _OUTLIER_ENCODING["missing"]["label"] + + # Overwrite infinite/missing outlier samples (otherwise simple noise) + labels[infinite_index] = _OUTLIER_ENCODING["infinite"]["label"] + labels[missing_index] = _OUTLIER_ENCODING["missing"]["label"] + return labels + + def _more_tags(self): + return {"allow_nan": self.metric != "precomputed"} diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aecc1171335c01b608b50f703bb2fdc3472c3427 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/test_reachibility.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/test_reachibility.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0faabfd60f89f124f4b53a11cfa728a615ab11f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/__pycache__/test_reachibility.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/test_reachibility.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/test_reachibility.py new file mode 100644 index 0000000000000000000000000000000000000000..53096dd7cbec7953e19018e6aeca4e6027c2625b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_hdbscan/tests/test_reachibility.py @@ -0,0 +1,63 @@ +import numpy as np +import pytest + +from sklearn.cluster._hdbscan._reachability import mutual_reachability_graph +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, +) + + +def test_mutual_reachability_graph_error_sparse_format(): + """Check that we raise an error if the sparse format is not CSR.""" + rng = np.random.RandomState(0) + X = rng.randn(10, 10) + X = X.T @ X + np.fill_diagonal(X, 0.0) + X = _convert_container(X, "sparse_csc") + + err_msg = "Only sparse CSR matrices are supported" + with pytest.raises(ValueError, match=err_msg): + mutual_reachability_graph(X) + + +@pytest.mark.parametrize("array_type", ["array", "sparse_csr"]) +def test_mutual_reachability_graph_inplace(array_type): + """Check that the operation is happening inplace.""" + rng = np.random.RandomState(0) + X = rng.randn(10, 10) + X = X.T @ X + np.fill_diagonal(X, 0.0) + X = _convert_container(X, array_type) + + mr_graph = mutual_reachability_graph(X) + + assert id(mr_graph) == id(X) + + +def test_mutual_reachability_graph_equivalence_dense_sparse(): + """Check that we get the same results for dense and sparse implementation.""" + rng = np.random.RandomState(0) + X = rng.randn(5, 5) + X_dense = X.T @ X + X_sparse = _convert_container(X_dense, "sparse_csr") + + mr_graph_dense = mutual_reachability_graph(X_dense, min_samples=3) + mr_graph_sparse = mutual_reachability_graph(X_sparse, min_samples=3) + + assert_allclose(mr_graph_dense, mr_graph_sparse.toarray()) + + +@pytest.mark.parametrize("array_type", ["array", "sparse_csr"]) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_mutual_reachability_graph_preserve_dtype(array_type, dtype): + """Check that the computation preserve dtype thanks to fused types.""" + rng = np.random.RandomState(0) + X = rng.randn(10, 10) + X = (X.T @ X).astype(dtype) + np.fill_diagonal(X, 0.0) + X = _convert_container(X, array_type) + + assert X.dtype == dtype + mr_graph = mutual_reachability_graph(X) + assert mr_graph.dtype == dtype diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..67487cccea374a3bd124ba2166c1e7d7c363e4b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.pxd b/venv/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a10f8c12f34402c872ccc3bd7c14266dcc9b5e7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_hierarchical_fast.pxd @@ -0,0 +1,9 @@ +from ..utils._typedefs cimport intp_t + +cdef class UnionFind: + cdef intp_t next_label + cdef intp_t[:] parent + cdef intp_t[:] size + + cdef void union(self, intp_t m, intp_t n) noexcept + cdef intp_t fast_find(self, intp_t n) noexcept diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_kmeans.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_kmeans.py new file mode 100644 index 0000000000000000000000000000000000000000..178242e60be578b9244df87692bd1f74fa4a49a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_kmeans.py @@ -0,0 +1,2318 @@ +"""K-means clustering.""" + +# Authors: Gael Varoquaux +# Thomas Rueckstiess +# James Bergstra +# Jan Schlueter +# Nelle Varoquaux +# Peter Prettenhofer +# Olivier Grisel +# Mathieu Blondel +# Robert Layton +# License: BSD 3 clause + +import warnings +from abc import ABC, abstractmethod +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + ClusterMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..metrics.pairwise import _euclidean_distances, euclidean_distances +from ..utils import check_array, check_random_state +from ..utils._openmp_helpers import _openmp_effective_n_threads +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import row_norms, stable_cumsum +from ..utils.fixes import threadpool_info, threadpool_limits +from ..utils.sparsefuncs import mean_variance_axis +from ..utils.sparsefuncs_fast import assign_rows_csr +from ..utils.validation import ( + _check_sample_weight, + _is_arraylike_not_scalar, + check_is_fitted, +) +from ._k_means_common import ( + CHUNK_SIZE, + _inertia_dense, + _inertia_sparse, + _is_same_clustering, +) +from ._k_means_elkan import ( + elkan_iter_chunked_dense, + elkan_iter_chunked_sparse, + init_bounds_dense, + init_bounds_sparse, +) +from ._k_means_lloyd import lloyd_iter_chunked_dense, lloyd_iter_chunked_sparse +from ._k_means_minibatch import _minibatch_update_dense, _minibatch_update_sparse + +############################################################################### +# Initialization heuristic + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "n_clusters": [Interval(Integral, 1, None, closed="left")], + "sample_weight": ["array-like", None], + "x_squared_norms": ["array-like", None], + "random_state": ["random_state"], + "n_local_trials": [Interval(Integral, 1, None, closed="left"), None], + }, + prefer_skip_nested_validation=True, +) +def kmeans_plusplus( + X, + n_clusters, + *, + sample_weight=None, + x_squared_norms=None, + random_state=None, + n_local_trials=None, +): + """Init n_clusters seeds according to k-means++. + + .. versionadded:: 0.24 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to pick seeds from. + + n_clusters : int + The number of centroids to initialize. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in `X`. If `None`, all observations + are assigned equal weight. `sample_weight` is ignored if `init` + is a callable or a user provided array. + + .. versionadded:: 1.3 + + x_squared_norms : array-like of shape (n_samples,), default=None + Squared Euclidean norm of each data point. + + random_state : int or RandomState instance, default=None + Determines random number generation for centroid initialization. Pass + an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + n_local_trials : int, default=None + The number of seeding trials for each center (except the first), + of which the one reducing inertia the most is greedily chosen. + Set to None to make the number of trials depend logarithmically + on the number of seeds (2+log(k)) which is the recommended setting. + Setting to 1 disables the greedy cluster selection and recovers the + vanilla k-means++ algorithm which was empirically shown to work less + well than its greedy variant. + + Returns + ------- + centers : ndarray of shape (n_clusters, n_features) + The initial centers for k-means. + + indices : ndarray of shape (n_clusters,) + The index location of the chosen centers in the data array X. For a + given index and center, X[index] = center. + + Notes + ----- + Selects initial cluster centers for k-mean clustering in a smart way + to speed up convergence. see: Arthur, D. and Vassilvitskii, S. + "k-means++: the advantages of careful seeding". ACM-SIAM symposium + on Discrete algorithms. 2007 + + Examples + -------- + + >>> from sklearn.cluster import kmeans_plusplus + >>> import numpy as np + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [10, 2], [10, 4], [10, 0]]) + >>> centers, indices = kmeans_plusplus(X, n_clusters=2, random_state=0) + >>> centers + array([[10, 2], + [ 1, 0]]) + >>> indices + array([3, 2]) + """ + # Check data + check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32]) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + if X.shape[0] < n_clusters: + raise ValueError( + f"n_samples={X.shape[0]} should be >= n_clusters={n_clusters}." + ) + + # Check parameters + if x_squared_norms is None: + x_squared_norms = row_norms(X, squared=True) + else: + x_squared_norms = check_array(x_squared_norms, dtype=X.dtype, ensure_2d=False) + + if x_squared_norms.shape[0] != X.shape[0]: + raise ValueError( + f"The length of x_squared_norms {x_squared_norms.shape[0]} should " + f"be equal to the length of n_samples {X.shape[0]}." + ) + + random_state = check_random_state(random_state) + + # Call private k-means++ + centers, indices = _kmeans_plusplus( + X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials + ) + + return centers, indices + + +def _kmeans_plusplus( + X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials=None +): + """Computational component for initialization of n_clusters by + k-means++. Prior validation of data is assumed. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The data to pick seeds for. + + n_clusters : int + The number of seeds to choose. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in `X`. + + x_squared_norms : ndarray of shape (n_samples,) + Squared Euclidean norm of each data point. + + random_state : RandomState instance + The generator used to initialize the centers. + See :term:`Glossary `. + + n_local_trials : int, default=None + The number of seeding trials for each center (except the first), + of which the one reducing inertia the most is greedily chosen. + Set to None to make the number of trials depend logarithmically + on the number of seeds (2+log(k)); this is the default. + + Returns + ------- + centers : ndarray of shape (n_clusters, n_features) + The initial centers for k-means. + + indices : ndarray of shape (n_clusters,) + The index location of the chosen centers in the data array X. For a + given index and center, X[index] = center. + """ + n_samples, n_features = X.shape + + centers = np.empty((n_clusters, n_features), dtype=X.dtype) + + # Set the number of local seeding trials if none is given + if n_local_trials is None: + # This is what Arthur/Vassilvitskii tried, but did not report + # specific results for other than mentioning in the conclusion + # that it helped. + n_local_trials = 2 + int(np.log(n_clusters)) + + # Pick first center randomly and track index of point + center_id = random_state.choice(n_samples, p=sample_weight / sample_weight.sum()) + indices = np.full(n_clusters, -1, dtype=int) + if sp.issparse(X): + centers[0] = X[[center_id]].toarray() + else: + centers[0] = X[center_id] + indices[0] = center_id + + # Initialize list of closest distances and calculate current potential + closest_dist_sq = _euclidean_distances( + centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True + ) + current_pot = closest_dist_sq @ sample_weight + + # Pick the remaining n_clusters-1 points + for c in range(1, n_clusters): + # Choose center candidates by sampling with probability proportional + # to the squared distance to the closest existing center + rand_vals = random_state.uniform(size=n_local_trials) * current_pot + candidate_ids = np.searchsorted( + stable_cumsum(sample_weight * closest_dist_sq), rand_vals + ) + # XXX: numerical imprecision can result in a candidate_id out of range + np.clip(candidate_ids, None, closest_dist_sq.size - 1, out=candidate_ids) + + # Compute distances to center candidates + distance_to_candidates = _euclidean_distances( + X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True + ) + + # update closest distances squared and potential for each candidate + np.minimum(closest_dist_sq, distance_to_candidates, out=distance_to_candidates) + candidates_pot = distance_to_candidates @ sample_weight.reshape(-1, 1) + + # Decide which candidate is the best + best_candidate = np.argmin(candidates_pot) + current_pot = candidates_pot[best_candidate] + closest_dist_sq = distance_to_candidates[best_candidate] + best_candidate = candidate_ids[best_candidate] + + # Permanently add best center candidate found in local tries + if sp.issparse(X): + centers[c] = X[[best_candidate]].toarray() + else: + centers[c] = X[best_candidate] + indices[c] = best_candidate + + return centers, indices + + +############################################################################### +# K-means batch estimation by EM (expectation maximization) + + +def _tolerance(X, tol): + """Return a tolerance which is dependent on the dataset.""" + if tol == 0: + return 0 + if sp.issparse(X): + variances = mean_variance_axis(X, axis=0)[1] + else: + variances = np.var(X, axis=0) + return np.mean(variances) * tol + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "sample_weight": ["array-like", None], + "return_n_iter": [bool], + }, + prefer_skip_nested_validation=False, +) +def k_means( + X, + n_clusters, + *, + sample_weight=None, + init="k-means++", + n_init="auto", + max_iter=300, + verbose=False, + tol=1e-4, + random_state=None, + copy_x=True, + algorithm="lloyd", + return_n_iter=False, +): + """Perform K-means clustering algorithm. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The observations to cluster. It must be noted that the data + will be converted to C ordering, which will cause a memory copy + if the given data is not C-contiguous. + + n_clusters : int + The number of clusters to form as well as the number of + centroids to generate. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in `X`. If `None`, all observations + are assigned equal weight. `sample_weight` is not used during + initialization if `init` is a callable or a user provided array. + + init : {'k-means++', 'random'}, callable or array-like of shape \ + (n_clusters, n_features), default='k-means++' + Method for initialization: + + - `'k-means++'` : selects initial cluster centers for k-mean + clustering in a smart way to speed up convergence. See section + Notes in k_init for more details. + - `'random'`: choose `n_clusters` observations (rows) at random from data + for the initial centroids. + - If an array is passed, it should be of shape `(n_clusters, n_features)` + and gives the initial centers. + - If a callable is passed, it should take arguments `X`, `n_clusters` and a + random state and return an initialization. + + n_init : 'auto' or int, default="auto" + Number of time the k-means algorithm will be run with different + centroid seeds. The final results will be the best output of + n_init consecutive runs in terms of inertia. + + When `n_init='auto'`, the number of runs depends on the value of init: + 10 if using `init='random'` or `init` is a callable; + 1 if using `init='k-means++'` or `init` is an array-like. + + .. versionadded:: 1.2 + Added 'auto' option for `n_init`. + + .. versionchanged:: 1.4 + Default value for `n_init` changed to `'auto'`. + + max_iter : int, default=300 + Maximum number of iterations of the k-means algorithm to run. + + verbose : bool, default=False + Verbosity mode. + + tol : float, default=1e-4 + Relative tolerance with regards to Frobenius norm of the difference + in the cluster centers of two consecutive iterations to declare + convergence. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for centroid initialization. Use + an int to make the randomness deterministic. + See :term:`Glossary `. + + copy_x : bool, default=True + When pre-computing distances it is more numerically accurate to center + the data first. If `copy_x` is True (default), then the original data is + not modified. If False, the original data is modified, and put back + before the function returns, but small numerical differences may be + introduced by subtracting and then adding the data mean. Note that if + the original data is not C-contiguous, a copy will be made even if + `copy_x` is False. If the original data is sparse, but not in CSR format, + a copy will be made even if `copy_x` is False. + + algorithm : {"lloyd", "elkan"}, default="lloyd" + K-means algorithm to use. The classical EM-style algorithm is `"lloyd"`. + The `"elkan"` variation can be more efficient on some datasets with + well-defined clusters, by using the triangle inequality. However it's + more memory intensive due to the allocation of an extra array of shape + `(n_samples, n_clusters)`. + + .. versionchanged:: 0.18 + Added Elkan algorithm + + .. versionchanged:: 1.1 + Renamed "full" to "lloyd", and deprecated "auto" and "full". + Changed "auto" to use "lloyd" instead of "elkan". + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + Returns + ------- + centroid : ndarray of shape (n_clusters, n_features) + Centroids found at the last iteration of k-means. + + label : ndarray of shape (n_samples,) + The `label[i]` is the code or index of the centroid the + i'th observation is closest to. + + inertia : float + The final value of the inertia criterion (sum of squared distances to + the closest centroid for all observations in the training set). + + best_n_iter : int + Number of iterations corresponding to the best results. + Returned only if `return_n_iter` is set to True. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import k_means + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [10, 2], [10, 4], [10, 0]]) + >>> centroid, label, inertia = k_means( + ... X, n_clusters=2, n_init="auto", random_state=0 + ... ) + >>> centroid + array([[10., 2.], + [ 1., 2.]]) + >>> label + array([1, 1, 1, 0, 0, 0], dtype=int32) + >>> inertia + 16.0 + """ + est = KMeans( + n_clusters=n_clusters, + init=init, + n_init=n_init, + max_iter=max_iter, + verbose=verbose, + tol=tol, + random_state=random_state, + copy_x=copy_x, + algorithm=algorithm, + ).fit(X, sample_weight=sample_weight) + if return_n_iter: + return est.cluster_centers_, est.labels_, est.inertia_, est.n_iter_ + else: + return est.cluster_centers_, est.labels_, est.inertia_ + + +def _kmeans_single_elkan( + X, + sample_weight, + centers_init, + max_iter=300, + verbose=False, + tol=1e-4, + n_threads=1, +): + """A single run of k-means elkan, assumes preparation completed prior. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The observations to cluster. If sparse matrix, must be in CSR format. + + sample_weight : array-like of shape (n_samples,) + The weights for each observation in X. + + centers_init : ndarray of shape (n_clusters, n_features) + The initial centers. + + max_iter : int, default=300 + Maximum number of iterations of the k-means algorithm to run. + + verbose : bool, default=False + Verbosity mode. + + tol : float, default=1e-4 + Relative tolerance with regards to Frobenius norm of the difference + in the cluster centers of two consecutive iterations to declare + convergence. + It's not advised to set `tol=0` since convergence might never be + declared due to rounding errors. Use a very small number instead. + + n_threads : int, default=1 + The number of OpenMP threads to use for the computation. Parallelism is + sample-wise on the main cython loop which assigns each sample to its + closest center. + + Returns + ------- + centroid : ndarray of shape (n_clusters, n_features) + Centroids found at the last iteration of k-means. + + label : ndarray of shape (n_samples,) + label[i] is the code or index of the centroid the + i'th observation is closest to. + + inertia : float + The final value of the inertia criterion (sum of squared distances to + the closest centroid for all observations in the training set). + + n_iter : int + Number of iterations run. + """ + n_samples = X.shape[0] + n_clusters = centers_init.shape[0] + + # Buffers to avoid new allocations at each iteration. + centers = centers_init + centers_new = np.zeros_like(centers) + weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype) + labels = np.full(n_samples, -1, dtype=np.int32) + labels_old = labels.copy() + center_half_distances = euclidean_distances(centers) / 2 + distance_next_center = np.partition( + np.asarray(center_half_distances), kth=1, axis=0 + )[1] + upper_bounds = np.zeros(n_samples, dtype=X.dtype) + lower_bounds = np.zeros((n_samples, n_clusters), dtype=X.dtype) + center_shift = np.zeros(n_clusters, dtype=X.dtype) + + if sp.issparse(X): + init_bounds = init_bounds_sparse + elkan_iter = elkan_iter_chunked_sparse + _inertia = _inertia_sparse + else: + init_bounds = init_bounds_dense + elkan_iter = elkan_iter_chunked_dense + _inertia = _inertia_dense + + init_bounds( + X, + centers, + center_half_distances, + labels, + upper_bounds, + lower_bounds, + n_threads=n_threads, + ) + + strict_convergence = False + + for i in range(max_iter): + elkan_iter( + X, + sample_weight, + centers, + centers_new, + weight_in_clusters, + center_half_distances, + distance_next_center, + upper_bounds, + lower_bounds, + labels, + center_shift, + n_threads, + ) + + # compute new pairwise distances between centers and closest other + # center of each center for next iterations + center_half_distances = euclidean_distances(centers_new) / 2 + distance_next_center = np.partition( + np.asarray(center_half_distances), kth=1, axis=0 + )[1] + + if verbose: + inertia = _inertia(X, sample_weight, centers, labels, n_threads) + print(f"Iteration {i}, inertia {inertia}") + + centers, centers_new = centers_new, centers + + if np.array_equal(labels, labels_old): + # First check the labels for strict convergence. + if verbose: + print(f"Converged at iteration {i}: strict convergence.") + strict_convergence = True + break + else: + # No strict convergence, check for tol based convergence. + center_shift_tot = (center_shift**2).sum() + if center_shift_tot <= tol: + if verbose: + print( + f"Converged at iteration {i}: center shift " + f"{center_shift_tot} within tolerance {tol}." + ) + break + + labels_old[:] = labels + + if not strict_convergence: + # rerun E-step so that predicted labels match cluster centers + elkan_iter( + X, + sample_weight, + centers, + centers, + weight_in_clusters, + center_half_distances, + distance_next_center, + upper_bounds, + lower_bounds, + labels, + center_shift, + n_threads, + update_centers=False, + ) + + inertia = _inertia(X, sample_weight, centers, labels, n_threads) + + return labels, inertia, centers, i + 1 + + +def _kmeans_single_lloyd( + X, + sample_weight, + centers_init, + max_iter=300, + verbose=False, + tol=1e-4, + n_threads=1, +): + """A single run of k-means lloyd, assumes preparation completed prior. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The observations to cluster. If sparse matrix, must be in CSR format. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in X. + + centers_init : ndarray of shape (n_clusters, n_features) + The initial centers. + + max_iter : int, default=300 + Maximum number of iterations of the k-means algorithm to run. + + verbose : bool, default=False + Verbosity mode + + tol : float, default=1e-4 + Relative tolerance with regards to Frobenius norm of the difference + in the cluster centers of two consecutive iterations to declare + convergence. + It's not advised to set `tol=0` since convergence might never be + declared due to rounding errors. Use a very small number instead. + + n_threads : int, default=1 + The number of OpenMP threads to use for the computation. Parallelism is + sample-wise on the main cython loop which assigns each sample to its + closest center. + + Returns + ------- + centroid : ndarray of shape (n_clusters, n_features) + Centroids found at the last iteration of k-means. + + label : ndarray of shape (n_samples,) + label[i] is the code or index of the centroid the + i'th observation is closest to. + + inertia : float + The final value of the inertia criterion (sum of squared distances to + the closest centroid for all observations in the training set). + + n_iter : int + Number of iterations run. + """ + n_clusters = centers_init.shape[0] + + # Buffers to avoid new allocations at each iteration. + centers = centers_init + centers_new = np.zeros_like(centers) + labels = np.full(X.shape[0], -1, dtype=np.int32) + labels_old = labels.copy() + weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype) + center_shift = np.zeros(n_clusters, dtype=X.dtype) + + if sp.issparse(X): + lloyd_iter = lloyd_iter_chunked_sparse + _inertia = _inertia_sparse + else: + lloyd_iter = lloyd_iter_chunked_dense + _inertia = _inertia_dense + + strict_convergence = False + + # Threadpoolctl context to limit the number of threads in second level of + # nested parallelism (i.e. BLAS) to avoid oversubscription. + with threadpool_limits(limits=1, user_api="blas"): + for i in range(max_iter): + lloyd_iter( + X, + sample_weight, + centers, + centers_new, + weight_in_clusters, + labels, + center_shift, + n_threads, + ) + + if verbose: + inertia = _inertia(X, sample_weight, centers, labels, n_threads) + print(f"Iteration {i}, inertia {inertia}.") + + centers, centers_new = centers_new, centers + + if np.array_equal(labels, labels_old): + # First check the labels for strict convergence. + if verbose: + print(f"Converged at iteration {i}: strict convergence.") + strict_convergence = True + break + else: + # No strict convergence, check for tol based convergence. + center_shift_tot = (center_shift**2).sum() + if center_shift_tot <= tol: + if verbose: + print( + f"Converged at iteration {i}: center shift " + f"{center_shift_tot} within tolerance {tol}." + ) + break + + labels_old[:] = labels + + if not strict_convergence: + # rerun E-step so that predicted labels match cluster centers + lloyd_iter( + X, + sample_weight, + centers, + centers, + weight_in_clusters, + labels, + center_shift, + n_threads, + update_centers=False, + ) + + inertia = _inertia(X, sample_weight, centers, labels, n_threads) + + return labels, inertia, centers, i + 1 + + +def _labels_inertia(X, sample_weight, centers, n_threads=1, return_inertia=True): + """E step of the K-means EM algorithm. + + Compute the labels and the inertia of the given samples and centers. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input samples to assign to the labels. If sparse matrix, must + be in CSR format. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in X. + + x_squared_norms : ndarray of shape (n_samples,) + Precomputed squared euclidean norm of each data point, to speed up + computations. + + centers : ndarray of shape (n_clusters, n_features) + The cluster centers. + + n_threads : int, default=1 + The number of OpenMP threads to use for the computation. Parallelism is + sample-wise on the main cython loop which assigns each sample to its + closest center. + + return_inertia : bool, default=True + Whether to compute and return the inertia. + + Returns + ------- + labels : ndarray of shape (n_samples,) + The resulting assignment. + + inertia : float + Sum of squared distances of samples to their closest cluster center. + Inertia is only returned if return_inertia is True. + """ + n_samples = X.shape[0] + n_clusters = centers.shape[0] + + labels = np.full(n_samples, -1, dtype=np.int32) + center_shift = np.zeros(n_clusters, dtype=centers.dtype) + + if sp.issparse(X): + _labels = lloyd_iter_chunked_sparse + _inertia = _inertia_sparse + else: + _labels = lloyd_iter_chunked_dense + _inertia = _inertia_dense + + _labels( + X, + sample_weight, + centers, + centers_new=None, + weight_in_clusters=None, + labels=labels, + center_shift=center_shift, + n_threads=n_threads, + update_centers=False, + ) + + if return_inertia: + inertia = _inertia(X, sample_weight, centers, labels, n_threads) + return labels, inertia + + return labels + + +def _labels_inertia_threadpool_limit( + X, sample_weight, centers, n_threads=1, return_inertia=True +): + """Same as _labels_inertia but in a threadpool_limits context.""" + with threadpool_limits(limits=1, user_api="blas"): + result = _labels_inertia(X, sample_weight, centers, n_threads, return_inertia) + + return result + + +class _BaseKMeans( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, ClusterMixin, BaseEstimator, ABC +): + """Base class for KMeans and MiniBatchKMeans""" + + _parameter_constraints: dict = { + "n_clusters": [Interval(Integral, 1, None, closed="left")], + "init": [StrOptions({"k-means++", "random"}), callable, "array-like"], + "n_init": [ + StrOptions({"auto"}), + Interval(Integral, 1, None, closed="left"), + ], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + n_clusters, + *, + init, + n_init, + max_iter, + tol, + verbose, + random_state, + ): + self.n_clusters = n_clusters + self.init = init + self.max_iter = max_iter + self.tol = tol + self.n_init = n_init + self.verbose = verbose + self.random_state = random_state + + def _check_params_vs_input(self, X, default_n_init=None): + # n_clusters + if X.shape[0] < self.n_clusters: + raise ValueError( + f"n_samples={X.shape[0]} should be >= n_clusters={self.n_clusters}." + ) + + # tol + self._tol = _tolerance(X, self.tol) + + # n-init + if self.n_init == "auto": + if isinstance(self.init, str) and self.init == "k-means++": + self._n_init = 1 + elif isinstance(self.init, str) and self.init == "random": + self._n_init = default_n_init + elif callable(self.init): + self._n_init = default_n_init + else: # array-like + self._n_init = 1 + else: + self._n_init = self.n_init + + if _is_arraylike_not_scalar(self.init) and self._n_init != 1: + warnings.warn( + ( + "Explicit initial center position passed: performing only" + f" one init in {self.__class__.__name__} instead of " + f"n_init={self._n_init}." + ), + RuntimeWarning, + stacklevel=2, + ) + self._n_init = 1 + + @abstractmethod + def _warn_mkl_vcomp(self, n_active_threads): + """Issue an estimator specific warning when vcomp and mkl are both present + + This method is called by `_check_mkl_vcomp`. + """ + + def _check_mkl_vcomp(self, X, n_samples): + """Check when vcomp and mkl are both present""" + # The BLAS call inside a prange in lloyd_iter_chunked_dense is known to + # cause a small memory leak when there are less chunks than the number + # of available threads. It only happens when the OpenMP library is + # vcomp (microsoft OpenMP) and the BLAS library is MKL. see #18653 + if sp.issparse(X): + return + + n_active_threads = int(np.ceil(n_samples / CHUNK_SIZE)) + if n_active_threads < self._n_threads: + modules = threadpool_info() + has_vcomp = "vcomp" in [module["prefix"] for module in modules] + has_mkl = ("mkl", "intel") in [ + (module["internal_api"], module.get("threading_layer", None)) + for module in modules + ] + if has_vcomp and has_mkl: + self._warn_mkl_vcomp(n_active_threads) + + def _validate_center_shape(self, X, centers): + """Check if centers is compatible with X and n_clusters.""" + if centers.shape[0] != self.n_clusters: + raise ValueError( + f"The shape of the initial centers {centers.shape} does not " + f"match the number of clusters {self.n_clusters}." + ) + if centers.shape[1] != X.shape[1]: + raise ValueError( + f"The shape of the initial centers {centers.shape} does not " + f"match the number of features of the data {X.shape[1]}." + ) + + def _check_test_data(self, X): + X = self._validate_data( + X, + accept_sparse="csr", + reset=False, + dtype=[np.float64, np.float32], + order="C", + accept_large_sparse=False, + ) + return X + + def _init_centroids( + self, + X, + x_squared_norms, + init, + random_state, + sample_weight, + init_size=None, + n_centroids=None, + ): + """Compute the initial centroids. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input samples. + + x_squared_norms : ndarray of shape (n_samples,) + Squared euclidean norm of each data point. Pass it if you have it + at hands already to avoid it being recomputed here. + + init : {'k-means++', 'random'}, callable or ndarray of shape \ + (n_clusters, n_features) + Method for initialization. + + random_state : RandomState instance + Determines random number generation for centroid initialization. + See :term:`Glossary `. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in X. `sample_weight` is not used + during initialization if `init` is a callable or a user provided + array. + + init_size : int, default=None + Number of samples to randomly sample for speeding up the + initialization (sometimes at the expense of accuracy). + + n_centroids : int, default=None + Number of centroids to initialize. + If left to 'None' the number of centroids will be equal to + number of clusters to form (self.n_clusters). + + Returns + ------- + centers : ndarray of shape (n_clusters, n_features) + Initial centroids of clusters. + """ + n_samples = X.shape[0] + n_clusters = self.n_clusters if n_centroids is None else n_centroids + + if init_size is not None and init_size < n_samples: + init_indices = random_state.randint(0, n_samples, init_size) + X = X[init_indices] + x_squared_norms = x_squared_norms[init_indices] + n_samples = X.shape[0] + sample_weight = sample_weight[init_indices] + + if isinstance(init, str) and init == "k-means++": + centers, _ = _kmeans_plusplus( + X, + n_clusters, + random_state=random_state, + x_squared_norms=x_squared_norms, + sample_weight=sample_weight, + ) + elif isinstance(init, str) and init == "random": + seeds = random_state.choice( + n_samples, + size=n_clusters, + replace=False, + p=sample_weight / sample_weight.sum(), + ) + centers = X[seeds] + elif _is_arraylike_not_scalar(self.init): + centers = init + elif callable(init): + centers = init(X, n_clusters, random_state=random_state) + centers = check_array(centers, dtype=X.dtype, copy=False, order="C") + self._validate_center_shape(X, centers) + + if sp.issparse(centers): + centers = centers.toarray() + + return centers + + def fit_predict(self, X, y=None, sample_weight=None): + """Compute cluster centers and predict cluster index for each sample. + + Convenience method; equivalent to calling fit(X) followed by + predict(X). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data to transform. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Index of the cluster each sample belongs to. + """ + return self.fit(X, sample_weight=sample_weight).labels_ + + def predict(self, X, sample_weight="deprecated"): + """Predict the closest cluster each sample in X belongs to. + + In the vector quantization literature, `cluster_centers_` is called + the code book and each value returned by `predict` is the index of + the closest code in the code book. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data to predict. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. + + .. deprecated:: 1.3 + The parameter `sample_weight` is deprecated in version 1.3 + and will be removed in 1.5. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Index of the cluster each sample belongs to. + """ + check_is_fitted(self) + + X = self._check_test_data(X) + if not (isinstance(sample_weight, str) and sample_weight == "deprecated"): + warnings.warn( + ( + "'sample_weight' was deprecated in version 1.3 and " + "will be removed in 1.5." + ), + FutureWarning, + ) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + else: + sample_weight = _check_sample_weight(None, X, dtype=X.dtype) + + labels = _labels_inertia_threadpool_limit( + X, + sample_weight, + self.cluster_centers_, + n_threads=self._n_threads, + return_inertia=False, + ) + + return labels + + def fit_transform(self, X, y=None, sample_weight=None): + """Compute clustering and transform X to cluster-distance space. + + Equivalent to fit(X).transform(X), but more efficiently implemented. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data to transform. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_clusters) + X transformed in the new space. + """ + return self.fit(X, sample_weight=sample_weight)._transform(X) + + def transform(self, X): + """Transform X to a cluster-distance space. + + In the new space, each dimension is the distance to the cluster + centers. Note that even if X is sparse, the array returned by + `transform` will typically be dense. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data to transform. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_clusters) + X transformed in the new space. + """ + check_is_fitted(self) + + X = self._check_test_data(X) + return self._transform(X) + + def _transform(self, X): + """Guts of transform method; no input validation.""" + return euclidean_distances(X, self.cluster_centers_) + + def score(self, X, y=None, sample_weight=None): + """Opposite of the value of X on the K-means objective. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. + + Returns + ------- + score : float + Opposite of the value of X on the K-means objective. + """ + check_is_fitted(self) + + X = self._check_test_data(X) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + _, scores = _labels_inertia_threadpool_limit( + X, sample_weight, self.cluster_centers_, self._n_threads + ) + return -scores + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "zero sample_weight is not equivalent to removing samples" + ), + }, + } + + +class KMeans(_BaseKMeans): + """K-Means clustering. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + + n_clusters : int, default=8 + The number of clusters to form as well as the number of + centroids to generate. + + For an example of how to choose an optimal value for `n_clusters` refer to + :ref:`sphx_glr_auto_examples_cluster_plot_kmeans_silhouette_analysis.py`. + + init : {'k-means++', 'random'}, callable or array-like of shape \ + (n_clusters, n_features), default='k-means++' + Method for initialization: + + * 'k-means++' : selects initial cluster centroids using sampling \ + based on an empirical probability distribution of the points' \ + contribution to the overall inertia. This technique speeds up \ + convergence. The algorithm implemented is "greedy k-means++". It \ + differs from the vanilla k-means++ by making several trials at \ + each sampling step and choosing the best centroid among them. + + * 'random': choose `n_clusters` observations (rows) at random from \ + data for the initial centroids. + + * If an array is passed, it should be of shape (n_clusters, n_features)\ + and gives the initial centers. + + * If a callable is passed, it should take arguments X, n_clusters and a\ + random state and return an initialization. + + For an example of how to use the different `init` strategy, see the example + entitled :ref:`sphx_glr_auto_examples_cluster_plot_kmeans_digits.py`. + + n_init : 'auto' or int, default='auto' + Number of times the k-means algorithm is run with different centroid + seeds. The final results is the best output of `n_init` consecutive runs + in terms of inertia. Several runs are recommended for sparse + high-dimensional problems (see :ref:`kmeans_sparse_high_dim`). + + When `n_init='auto'`, the number of runs depends on the value of init: + 10 if using `init='random'` or `init` is a callable; + 1 if using `init='k-means++'` or `init` is an array-like. + + .. versionadded:: 1.2 + Added 'auto' option for `n_init`. + + .. versionchanged:: 1.4 + Default value for `n_init` changed to `'auto'`. + + max_iter : int, default=300 + Maximum number of iterations of the k-means algorithm for a + single run. + + tol : float, default=1e-4 + Relative tolerance with regards to Frobenius norm of the difference + in the cluster centers of two consecutive iterations to declare + convergence. + + verbose : int, default=0 + Verbosity mode. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for centroid initialization. Use + an int to make the randomness deterministic. + See :term:`Glossary `. + + copy_x : bool, default=True + When pre-computing distances it is more numerically accurate to center + the data first. If copy_x is True (default), then the original data is + not modified. If False, the original data is modified, and put back + before the function returns, but small numerical differences may be + introduced by subtracting and then adding the data mean. Note that if + the original data is not C-contiguous, a copy will be made even if + copy_x is False. If the original data is sparse, but not in CSR format, + a copy will be made even if copy_x is False. + + algorithm : {"lloyd", "elkan"}, default="lloyd" + K-means algorithm to use. The classical EM-style algorithm is `"lloyd"`. + The `"elkan"` variation can be more efficient on some datasets with + well-defined clusters, by using the triangle inequality. However it's + more memory intensive due to the allocation of an extra array of shape + `(n_samples, n_clusters)`. + + .. versionchanged:: 0.18 + Added Elkan algorithm + + .. versionchanged:: 1.1 + Renamed "full" to "lloyd", and deprecated "auto" and "full". + Changed "auto" to use "lloyd" instead of "elkan". + + Attributes + ---------- + cluster_centers_ : ndarray of shape (n_clusters, n_features) + Coordinates of cluster centers. If the algorithm stops before fully + converging (see ``tol`` and ``max_iter``), these will not be + consistent with ``labels_``. + + labels_ : ndarray of shape (n_samples,) + Labels of each point + + inertia_ : float + Sum of squared distances of samples to their closest cluster center, + weighted by the sample weights if provided. + + n_iter_ : int + Number of iterations run. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MiniBatchKMeans : Alternative online implementation that does incremental + updates of the centers positions using mini-batches. + For large scale learning (say n_samples > 10k) MiniBatchKMeans is + probably much faster than the default batch implementation. + + Notes + ----- + The k-means problem is solved using either Lloyd's or Elkan's algorithm. + + The average complexity is given by O(k n T), where n is the number of + samples and T is the number of iteration. + + The worst case complexity is given by O(n^(k+2/p)) with + n = n_samples, p = n_features. + Refer to :doi:`"How slow is the k-means method?" D. Arthur and S. Vassilvitskii - + SoCG2006.<10.1145/1137856.1137880>` for more details. + + In practice, the k-means algorithm is very fast (one of the fastest + clustering algorithms available), but it falls in local minima. That's why + it can be useful to restart it several times. + + If the algorithm stops before fully converging (because of ``tol`` or + ``max_iter``), ``labels_`` and ``cluster_centers_`` will not be consistent, + i.e. the ``cluster_centers_`` will not be the means of the points in each + cluster. Also, the estimator will reassign ``labels_`` after the last + iteration to make ``labels_`` consistent with ``predict`` on the training + set. + + Examples + -------- + + >>> from sklearn.cluster import KMeans + >>> import numpy as np + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [10, 2], [10, 4], [10, 0]]) + >>> kmeans = KMeans(n_clusters=2, random_state=0, n_init="auto").fit(X) + >>> kmeans.labels_ + array([1, 1, 1, 0, 0, 0], dtype=int32) + >>> kmeans.predict([[0, 0], [12, 3]]) + array([1, 0], dtype=int32) + >>> kmeans.cluster_centers_ + array([[10., 2.], + [ 1., 2.]]) + + For a more detailed example of K-Means using the iris dataset see + :ref:`sphx_glr_auto_examples_cluster_plot_cluster_iris.py`. + + For examples of common problems with K-Means and how to address them see + :ref:`sphx_glr_auto_examples_cluster_plot_kmeans_assumptions.py`. + + For an example of how to use K-Means to perform color quantization see + :ref:`sphx_glr_auto_examples_cluster_plot_color_quantization.py`. + + For a demonstration of how K-Means can be used to cluster text documents see + :ref:`sphx_glr_auto_examples_text_plot_document_clustering.py`. + + For a comparison between K-Means and MiniBatchKMeans refer to example + :ref:`sphx_glr_auto_examples_cluster_plot_mini_batch_kmeans.py`. + """ + + _parameter_constraints: dict = { + **_BaseKMeans._parameter_constraints, + "copy_x": ["boolean"], + "algorithm": [StrOptions({"lloyd", "elkan"})], + } + + def __init__( + self, + n_clusters=8, + *, + init="k-means++", + n_init="auto", + max_iter=300, + tol=1e-4, + verbose=0, + random_state=None, + copy_x=True, + algorithm="lloyd", + ): + super().__init__( + n_clusters=n_clusters, + init=init, + n_init=n_init, + max_iter=max_iter, + tol=tol, + verbose=verbose, + random_state=random_state, + ) + + self.copy_x = copy_x + self.algorithm = algorithm + + def _check_params_vs_input(self, X): + super()._check_params_vs_input(X, default_n_init=10) + + self._algorithm = self.algorithm + if self._algorithm == "elkan" and self.n_clusters == 1: + warnings.warn( + ( + "algorithm='elkan' doesn't make sense for a single " + "cluster. Using 'lloyd' instead." + ), + RuntimeWarning, + ) + self._algorithm = "lloyd" + + def _warn_mkl_vcomp(self, n_active_threads): + """Warn when vcomp and mkl are both present""" + warnings.warn( + "KMeans is known to have a memory leak on Windows " + "with MKL, when there are less chunks than available " + "threads. You can avoid it by setting the environment" + f" variable OMP_NUM_THREADS={n_active_threads}." + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, sample_weight=None): + """Compute k-means clustering. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training instances to cluster. It must be noted that the data + will be converted to C ordering, which will cause a memory + copy if the given data is not C-contiguous. + If a sparse matrix is passed, a copy will be made if it's not in + CSR format. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. `sample_weight` is not used during + initialization if `init` is a callable or a user provided array. + + .. versionadded:: 0.20 + + Returns + ------- + self : object + Fitted estimator. + """ + X = self._validate_data( + X, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + copy=self.copy_x, + accept_large_sparse=False, + ) + + self._check_params_vs_input(X) + + random_state = check_random_state(self.random_state) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + self._n_threads = _openmp_effective_n_threads() + + # Validate init array + init = self.init + init_is_array_like = _is_arraylike_not_scalar(init) + if init_is_array_like: + init = check_array(init, dtype=X.dtype, copy=True, order="C") + self._validate_center_shape(X, init) + + # subtract of mean of x for more accurate distance computations + if not sp.issparse(X): + X_mean = X.mean(axis=0) + # The copy was already done above + X -= X_mean + + if init_is_array_like: + init -= X_mean + + # precompute squared norms of data points + x_squared_norms = row_norms(X, squared=True) + + if self._algorithm == "elkan": + kmeans_single = _kmeans_single_elkan + else: + kmeans_single = _kmeans_single_lloyd + self._check_mkl_vcomp(X, X.shape[0]) + + best_inertia, best_labels = None, None + + for i in range(self._n_init): + # Initialize centers + centers_init = self._init_centroids( + X, + x_squared_norms=x_squared_norms, + init=init, + random_state=random_state, + sample_weight=sample_weight, + ) + if self.verbose: + print("Initialization complete") + + # run a k-means once + labels, inertia, centers, n_iter_ = kmeans_single( + X, + sample_weight, + centers_init, + max_iter=self.max_iter, + verbose=self.verbose, + tol=self._tol, + n_threads=self._n_threads, + ) + + # determine if these results are the best so far + # we chose a new run if it has a better inertia and the clustering is + # different from the best so far (it's possible that the inertia is + # slightly better even if the clustering is the same with potentially + # permuted labels, due to rounding errors) + if best_inertia is None or ( + inertia < best_inertia + and not _is_same_clustering(labels, best_labels, self.n_clusters) + ): + best_labels = labels + best_centers = centers + best_inertia = inertia + best_n_iter = n_iter_ + + if not sp.issparse(X): + if not self.copy_x: + X += X_mean + best_centers += X_mean + + distinct_clusters = len(set(best_labels)) + if distinct_clusters < self.n_clusters: + warnings.warn( + "Number of distinct clusters ({}) found smaller than " + "n_clusters ({}). Possibly due to duplicate points " + "in X.".format(distinct_clusters, self.n_clusters), + ConvergenceWarning, + stacklevel=2, + ) + + self.cluster_centers_ = best_centers + self._n_features_out = self.cluster_centers_.shape[0] + self.labels_ = best_labels + self.inertia_ = best_inertia + self.n_iter_ = best_n_iter + return self + + +def _mini_batch_step( + X, + sample_weight, + centers, + centers_new, + weight_sums, + random_state, + random_reassign=False, + reassignment_ratio=0.01, + verbose=False, + n_threads=1, +): + """Incremental update of the centers for the Minibatch K-Means algorithm. + + Parameters + ---------- + + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The original data array. If sparse, must be in CSR format. + + x_squared_norms : ndarray of shape (n_samples,) + Squared euclidean norm of each data point. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in `X`. + + centers : ndarray of shape (n_clusters, n_features) + The cluster centers before the current iteration + + centers_new : ndarray of shape (n_clusters, n_features) + The cluster centers after the current iteration. Modified in-place. + + weight_sums : ndarray of shape (n_clusters,) + The vector in which we keep track of the numbers of points in a + cluster. This array is modified in place. + + random_state : RandomState instance + Determines random number generation for low count centers reassignment. + See :term:`Glossary `. + + random_reassign : boolean, default=False + If True, centers with very low counts are randomly reassigned + to observations. + + reassignment_ratio : float, default=0.01 + Control the fraction of the maximum number of counts for a + center to be reassigned. A higher value means that low count + centers are more likely to be reassigned, which means that the + model will take longer to converge, but should converge in a + better clustering. + + verbose : bool, default=False + Controls the verbosity. + + n_threads : int, default=1 + The number of OpenMP threads to use for the computation. + + Returns + ------- + inertia : float + Sum of squared distances of samples to their closest cluster center. + The inertia is computed after finding the labels and before updating + the centers. + """ + # Perform label assignment to nearest centers + # For better efficiency, it's better to run _mini_batch_step in a + # threadpool_limit context than using _labels_inertia_threadpool_limit here + labels, inertia = _labels_inertia(X, sample_weight, centers, n_threads=n_threads) + + # Update centers according to the labels + if sp.issparse(X): + _minibatch_update_sparse( + X, sample_weight, centers, centers_new, weight_sums, labels, n_threads + ) + else: + _minibatch_update_dense( + X, + sample_weight, + centers, + centers_new, + weight_sums, + labels, + n_threads, + ) + + # Reassign clusters that have very low weight + if random_reassign and reassignment_ratio > 0: + to_reassign = weight_sums < reassignment_ratio * weight_sums.max() + + # pick at most .5 * batch_size samples as new centers + if to_reassign.sum() > 0.5 * X.shape[0]: + indices_dont_reassign = np.argsort(weight_sums)[int(0.5 * X.shape[0]) :] + to_reassign[indices_dont_reassign] = False + n_reassigns = to_reassign.sum() + + if n_reassigns: + # Pick new clusters amongst observations with uniform probability + new_centers = random_state.choice( + X.shape[0], replace=False, size=n_reassigns + ) + if verbose: + print(f"[MiniBatchKMeans] Reassigning {n_reassigns} cluster centers.") + + if sp.issparse(X): + assign_rows_csr( + X, + new_centers.astype(np.intp, copy=False), + np.where(to_reassign)[0].astype(np.intp, copy=False), + centers_new, + ) + else: + centers_new[to_reassign] = X[new_centers] + + # reset counts of reassigned centers, but don't reset them too small + # to avoid instant reassignment. This is a pretty dirty hack as it + # also modifies the learning rates. + weight_sums[to_reassign] = np.min(weight_sums[~to_reassign]) + + return inertia + + +class MiniBatchKMeans(_BaseKMeans): + """ + Mini-Batch K-Means clustering. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + + n_clusters : int, default=8 + The number of clusters to form as well as the number of + centroids to generate. + + init : {'k-means++', 'random'}, callable or array-like of shape \ + (n_clusters, n_features), default='k-means++' + Method for initialization: + + 'k-means++' : selects initial cluster centroids using sampling based on + an empirical probability distribution of the points' contribution to the + overall inertia. This technique speeds up convergence. The algorithm + implemented is "greedy k-means++". It differs from the vanilla k-means++ + by making several trials at each sampling step and choosing the best centroid + among them. + + 'random': choose `n_clusters` observations (rows) at random from data + for the initial centroids. + + If an array is passed, it should be of shape (n_clusters, n_features) + and gives the initial centers. + + If a callable is passed, it should take arguments X, n_clusters and a + random state and return an initialization. + + max_iter : int, default=100 + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion heuristics. + + batch_size : int, default=1024 + Size of the mini batches. + For faster computations, you can set the ``batch_size`` greater than + 256 * number of cores to enable parallelism on all cores. + + .. versionchanged:: 1.0 + `batch_size` default changed from 100 to 1024. + + verbose : int, default=0 + Verbosity mode. + + compute_labels : bool, default=True + Compute label assignment and inertia for the complete dataset + once the minibatch optimization has converged in fit. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for centroid initialization and + random reassignment. Use an int to make the randomness deterministic. + See :term:`Glossary `. + + tol : float, default=0.0 + Control early stopping based on the relative center changes as + measured by a smoothed, variance-normalized of the mean center + squared position changes. This early stopping heuristics is + closer to the one used for the batch variant of the algorithms + but induces a slight computational and memory overhead over the + inertia heuristic. + + To disable convergence detection based on normalized center + change, set tol to 0.0 (default). + + max_no_improvement : int, default=10 + Control early stopping based on the consecutive number of mini + batches that does not yield an improvement on the smoothed inertia. + + To disable convergence detection based on inertia, set + max_no_improvement to None. + + init_size : int, default=None + Number of samples to randomly sample for speeding up the + initialization (sometimes at the expense of accuracy): the + only algorithm is initialized by running a batch KMeans on a + random subset of the data. This needs to be larger than n_clusters. + + If `None`, the heuristic is `init_size = 3 * batch_size` if + `3 * batch_size < n_clusters`, else `init_size = 3 * n_clusters`. + + n_init : 'auto' or int, default="auto" + Number of random initializations that are tried. + In contrast to KMeans, the algorithm is only run once, using the best of + the `n_init` initializations as measured by inertia. Several runs are + recommended for sparse high-dimensional problems (see + :ref:`kmeans_sparse_high_dim`). + + When `n_init='auto'`, the number of runs depends on the value of init: + 3 if using `init='random'` or `init` is a callable; + 1 if using `init='k-means++'` or `init` is an array-like. + + .. versionadded:: 1.2 + Added 'auto' option for `n_init`. + + .. versionchanged:: 1.4 + Default value for `n_init` changed to `'auto'` in version. + + reassignment_ratio : float, default=0.01 + Control the fraction of the maximum number of counts for a center to + be reassigned. A higher value means that low count centers are more + easily reassigned, which means that the model will take longer to + converge, but should converge in a better clustering. However, too high + a value may cause convergence issues, especially with a small batch + size. + + Attributes + ---------- + + cluster_centers_ : ndarray of shape (n_clusters, n_features) + Coordinates of cluster centers. + + labels_ : ndarray of shape (n_samples,) + Labels of each point (if compute_labels is set to True). + + inertia_ : float + The value of the inertia criterion associated with the chosen + partition if compute_labels is set to True. If compute_labels is set to + False, it's an approximation of the inertia based on an exponentially + weighted average of the batch inertiae. + The inertia is defined as the sum of square distances of samples to + their cluster center, weighted by the sample weights if provided. + + n_iter_ : int + Number of iterations over the full dataset. + + n_steps_ : int + Number of minibatches processed. + + .. versionadded:: 1.0 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + KMeans : The classic implementation of the clustering method based on the + Lloyd's algorithm. It consumes the whole set of input data at each + iteration. + + Notes + ----- + See https://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf + + When there are too few points in the dataset, some centers may be + duplicated, which means that a proper clustering in terms of the number + of requesting clusters and the number of returned clusters will not + always match. One solution is to set `reassignment_ratio=0`, which + prevents reassignments of clusters that are too small. + + Examples + -------- + >>> from sklearn.cluster import MiniBatchKMeans + >>> import numpy as np + >>> X = np.array([[1, 2], [1, 4], [1, 0], + ... [4, 2], [4, 0], [4, 4], + ... [4, 5], [0, 1], [2, 2], + ... [3, 2], [5, 5], [1, -1]]) + >>> # manually fit on batches + >>> kmeans = MiniBatchKMeans(n_clusters=2, + ... random_state=0, + ... batch_size=6, + ... n_init="auto") + >>> kmeans = kmeans.partial_fit(X[0:6,:]) + >>> kmeans = kmeans.partial_fit(X[6:12,:]) + >>> kmeans.cluster_centers_ + array([[3.375, 3. ], + [0.75 , 0.5 ]]) + >>> kmeans.predict([[0, 0], [4, 4]]) + array([1, 0], dtype=int32) + >>> # fit on the whole data + >>> kmeans = MiniBatchKMeans(n_clusters=2, + ... random_state=0, + ... batch_size=6, + ... max_iter=10, + ... n_init="auto").fit(X) + >>> kmeans.cluster_centers_ + array([[3.55102041, 2.48979592], + [1.06896552, 1. ]]) + >>> kmeans.predict([[0, 0], [4, 4]]) + array([1, 0], dtype=int32) + """ + + _parameter_constraints: dict = { + **_BaseKMeans._parameter_constraints, + "batch_size": [Interval(Integral, 1, None, closed="left")], + "compute_labels": ["boolean"], + "max_no_improvement": [Interval(Integral, 0, None, closed="left"), None], + "init_size": [Interval(Integral, 1, None, closed="left"), None], + "reassignment_ratio": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + n_clusters=8, + *, + init="k-means++", + max_iter=100, + batch_size=1024, + verbose=0, + compute_labels=True, + random_state=None, + tol=0.0, + max_no_improvement=10, + init_size=None, + n_init="auto", + reassignment_ratio=0.01, + ): + super().__init__( + n_clusters=n_clusters, + init=init, + max_iter=max_iter, + verbose=verbose, + random_state=random_state, + tol=tol, + n_init=n_init, + ) + + self.max_no_improvement = max_no_improvement + self.batch_size = batch_size + self.compute_labels = compute_labels + self.init_size = init_size + self.reassignment_ratio = reassignment_ratio + + def _check_params_vs_input(self, X): + super()._check_params_vs_input(X, default_n_init=3) + + self._batch_size = min(self.batch_size, X.shape[0]) + + # init_size + self._init_size = self.init_size + if self._init_size is None: + self._init_size = 3 * self._batch_size + if self._init_size < self.n_clusters: + self._init_size = 3 * self.n_clusters + elif self._init_size < self.n_clusters: + warnings.warn( + ( + f"init_size={self._init_size} should be larger than " + f"n_clusters={self.n_clusters}. Setting it to " + "min(3*n_clusters, n_samples)" + ), + RuntimeWarning, + stacklevel=2, + ) + self._init_size = 3 * self.n_clusters + self._init_size = min(self._init_size, X.shape[0]) + + # reassignment_ratio + if self.reassignment_ratio < 0: + raise ValueError( + "reassignment_ratio should be >= 0, got " + f"{self.reassignment_ratio} instead." + ) + + def _warn_mkl_vcomp(self, n_active_threads): + """Warn when vcomp and mkl are both present""" + warnings.warn( + "MiniBatchKMeans is known to have a memory leak on " + "Windows with MKL, when there are less chunks than " + "available threads. You can prevent it by setting " + f"batch_size >= {self._n_threads * CHUNK_SIZE} or by " + "setting the environment variable " + f"OMP_NUM_THREADS={n_active_threads}" + ) + + def _mini_batch_convergence( + self, step, n_steps, n_samples, centers_squared_diff, batch_inertia + ): + """Helper function to encapsulate the early stopping logic""" + # Normalize inertia to be able to compare values when + # batch_size changes + batch_inertia /= self._batch_size + + # count steps starting from 1 for user friendly verbose mode. + step = step + 1 + + # Ignore first iteration because it's inertia from initialization. + if step == 1: + if self.verbose: + print( + f"Minibatch step {step}/{n_steps}: mean batch " + f"inertia: {batch_inertia}" + ) + return False + + # Compute an Exponentially Weighted Average of the inertia to + # monitor the convergence while discarding minibatch-local stochastic + # variability: https://en.wikipedia.org/wiki/Moving_average + if self._ewa_inertia is None: + self._ewa_inertia = batch_inertia + else: + alpha = self._batch_size * 2.0 / (n_samples + 1) + alpha = min(alpha, 1) + self._ewa_inertia = self._ewa_inertia * (1 - alpha) + batch_inertia * alpha + + # Log progress to be able to monitor convergence + if self.verbose: + print( + f"Minibatch step {step}/{n_steps}: mean batch inertia: " + f"{batch_inertia}, ewa inertia: {self._ewa_inertia}" + ) + + # Early stopping based on absolute tolerance on squared change of + # centers position + if self._tol > 0.0 and centers_squared_diff <= self._tol: + if self.verbose: + print(f"Converged (small centers change) at step {step}/{n_steps}") + return True + + # Early stopping heuristic due to lack of improvement on smoothed + # inertia + if self._ewa_inertia_min is None or self._ewa_inertia < self._ewa_inertia_min: + self._no_improvement = 0 + self._ewa_inertia_min = self._ewa_inertia + else: + self._no_improvement += 1 + + if ( + self.max_no_improvement is not None + and self._no_improvement >= self.max_no_improvement + ): + if self.verbose: + print( + "Converged (lack of improvement in inertia) at step " + f"{step}/{n_steps}" + ) + return True + + return False + + def _random_reassign(self): + """Check if a random reassignment needs to be done. + + Do random reassignments each time 10 * n_clusters samples have been + processed. + + If there are empty clusters we always want to reassign. + """ + self._n_since_last_reassign += self._batch_size + if (self._counts == 0).any() or self._n_since_last_reassign >= ( + 10 * self.n_clusters + ): + self._n_since_last_reassign = 0 + return True + return False + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, sample_weight=None): + """Compute the centroids on X by chunking it into mini-batches. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training instances to cluster. It must be noted that the data + will be converted to C ordering, which will cause a memory copy + if the given data is not C-contiguous. + If a sparse matrix is passed, a copy will be made if it's not in + CSR format. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. `sample_weight` is not used during + initialization if `init` is a callable or a user provided array. + + .. versionadded:: 0.20 + + Returns + ------- + self : object + Fitted estimator. + """ + X = self._validate_data( + X, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + accept_large_sparse=False, + ) + + self._check_params_vs_input(X) + random_state = check_random_state(self.random_state) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + self._n_threads = _openmp_effective_n_threads() + n_samples, n_features = X.shape + + # Validate init array + init = self.init + if _is_arraylike_not_scalar(init): + init = check_array(init, dtype=X.dtype, copy=True, order="C") + self._validate_center_shape(X, init) + + self._check_mkl_vcomp(X, self._batch_size) + + # precompute squared norms of data points + x_squared_norms = row_norms(X, squared=True) + + # Validation set for the init + validation_indices = random_state.randint(0, n_samples, self._init_size) + X_valid = X[validation_indices] + sample_weight_valid = sample_weight[validation_indices] + + # perform several inits with random subsets + best_inertia = None + for init_idx in range(self._n_init): + if self.verbose: + print(f"Init {init_idx + 1}/{self._n_init} with method {init}") + + # Initialize the centers using only a fraction of the data as we + # expect n_samples to be very large when using MiniBatchKMeans. + cluster_centers = self._init_centroids( + X, + x_squared_norms=x_squared_norms, + init=init, + random_state=random_state, + init_size=self._init_size, + sample_weight=sample_weight, + ) + + # Compute inertia on a validation set. + _, inertia = _labels_inertia_threadpool_limit( + X_valid, + sample_weight_valid, + cluster_centers, + n_threads=self._n_threads, + ) + + if self.verbose: + print(f"Inertia for init {init_idx + 1}/{self._n_init}: {inertia}") + if best_inertia is None or inertia < best_inertia: + init_centers = cluster_centers + best_inertia = inertia + + centers = init_centers + centers_new = np.empty_like(centers) + + # Initialize counts + self._counts = np.zeros(self.n_clusters, dtype=X.dtype) + + # Attributes to monitor the convergence + self._ewa_inertia = None + self._ewa_inertia_min = None + self._no_improvement = 0 + + # Initialize number of samples seen since last reassignment + self._n_since_last_reassign = 0 + + n_steps = (self.max_iter * n_samples) // self._batch_size + + with threadpool_limits(limits=1, user_api="blas"): + # Perform the iterative optimization until convergence + for i in range(n_steps): + # Sample a minibatch from the full dataset + minibatch_indices = random_state.randint(0, n_samples, self._batch_size) + + # Perform the actual update step on the minibatch data + batch_inertia = _mini_batch_step( + X=X[minibatch_indices], + sample_weight=sample_weight[minibatch_indices], + centers=centers, + centers_new=centers_new, + weight_sums=self._counts, + random_state=random_state, + random_reassign=self._random_reassign(), + reassignment_ratio=self.reassignment_ratio, + verbose=self.verbose, + n_threads=self._n_threads, + ) + + if self._tol > 0.0: + centers_squared_diff = np.sum((centers_new - centers) ** 2) + else: + centers_squared_diff = 0 + + centers, centers_new = centers_new, centers + + # Monitor convergence and do early stopping if necessary + if self._mini_batch_convergence( + i, n_steps, n_samples, centers_squared_diff, batch_inertia + ): + break + + self.cluster_centers_ = centers + self._n_features_out = self.cluster_centers_.shape[0] + + self.n_steps_ = i + 1 + self.n_iter_ = int(np.ceil(((i + 1) * self._batch_size) / n_samples)) + + if self.compute_labels: + self.labels_, self.inertia_ = _labels_inertia_threadpool_limit( + X, + sample_weight, + self.cluster_centers_, + n_threads=self._n_threads, + ) + else: + self.inertia_ = self._ewa_inertia * n_samples + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, sample_weight=None): + """Update k means estimate on a single mini-batch X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training instances to cluster. It must be noted that the data + will be converted to C ordering, which will cause a memory copy + if the given data is not C-contiguous. + If a sparse matrix is passed, a copy will be made if it's not in + CSR format. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. `sample_weight` is not used during + initialization if `init` is a callable or a user provided array. + + Returns + ------- + self : object + Return updated estimator. + """ + has_centers = hasattr(self, "cluster_centers_") + + X = self._validate_data( + X, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + accept_large_sparse=False, + reset=not has_centers, + ) + + self._random_state = getattr( + self, "_random_state", check_random_state(self.random_state) + ) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + self.n_steps_ = getattr(self, "n_steps_", 0) + + # precompute squared norms of data points + x_squared_norms = row_norms(X, squared=True) + + if not has_centers: + # this instance has not been fitted yet (fit or partial_fit) + self._check_params_vs_input(X) + self._n_threads = _openmp_effective_n_threads() + + # Validate init array + init = self.init + if _is_arraylike_not_scalar(init): + init = check_array(init, dtype=X.dtype, copy=True, order="C") + self._validate_center_shape(X, init) + + self._check_mkl_vcomp(X, X.shape[0]) + + # initialize the cluster centers + self.cluster_centers_ = self._init_centroids( + X, + x_squared_norms=x_squared_norms, + init=init, + random_state=self._random_state, + init_size=self._init_size, + sample_weight=sample_weight, + ) + + # Initialize counts + self._counts = np.zeros(self.n_clusters, dtype=X.dtype) + + # Initialize number of samples seen since last reassignment + self._n_since_last_reassign = 0 + + with threadpool_limits(limits=1, user_api="blas"): + _mini_batch_step( + X, + sample_weight=sample_weight, + centers=self.cluster_centers_, + centers_new=self.cluster_centers_, + weight_sums=self._counts, + random_state=self._random_state, + random_reassign=self._random_reassign(), + reassignment_ratio=self.reassignment_ratio, + verbose=self.verbose, + n_threads=self._n_threads, + ) + + if self.compute_labels: + self.labels_, self.inertia_ = _labels_inertia_threadpool_limit( + X, + sample_weight, + self.cluster_centers_, + n_threads=self._n_threads, + ) + + self.n_steps_ += 1 + self._n_features_out = self.cluster_centers_.shape[0] + + return self diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f9256b5dabde0153cf5d81fab3e5da89c68153a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34dcec375e5853d8d2cc1f794933f33f64dfb4f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..875c1edc63978dd78f0f12dd7040391a7a893918 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfaf9ac5c4608ad8155b008ee8c9b6d428c5009c Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7239e33d3309e0e92799c80d511c8356ca910db2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39e1c61896b13315c0d959aee513dfee22dbc7da Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_dbscan.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_dbscan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2181950392b6e335bdc51598c15b88a6b264039 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_dbscan.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_feature_agglomeration.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_feature_agglomeration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d5355564cd081cd4f644ce27dfc8a7ec22a8bcb Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_feature_agglomeration.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hdbscan.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hdbscan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d95474e34a064393aee4150adf4c24a43649cf6d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hdbscan.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hierarchical.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hierarchical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04a3e028268e4bd2080b2197954241cd8b5bda44 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_hierarchical.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_k_means.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_k_means.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01261c022b954ab606c2e88dc0bd4415b2902808 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_k_means.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_mean_shift.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_mean_shift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a70e4f68288044b1d5a382ca6d1a9a550d0557a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_mean_shift.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_optics.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_optics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..580ebf68e6db83f1446a310cbec424739f60401e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_optics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_spectral.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_spectral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9227f38fb4d49e7108ec4528635fa2ba4f8a5f78 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/__pycache__/test_spectral.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/common.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/common.py new file mode 100644 index 0000000000000000000000000000000000000000..b1fe047fe230af1c3fbb2ec0b72f3ef20e5aa3aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/common.py @@ -0,0 +1,37 @@ +""" +Common utilities for testing clustering. + +""" + +import numpy as np + +############################################################################### +# Generate sample data + + +def generate_clustered_data( + seed=0, n_clusters=3, n_features=2, n_samples_per_cluster=20, std=0.4 +): + prng = np.random.RandomState(seed) + + # the data is voluntary shifted away from zero to check clustering + # algorithm robustness with regards to non centered data + means = ( + np.array( + [ + [1, 1, 1, 0], + [-1, -1, 0, 1], + [1, -1, 1, 1], + [-1, 1, 1, 0], + ] + ) + + 10 + ) + + X = np.empty((0, n_features)) + for i in range(n_clusters): + X = np.r_[ + X, + means[i][:n_features] + std * prng.randn(n_samples_per_cluster, n_features), + ] + return X diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_affinity_propagation.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_affinity_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..c3138e59111ed849988dd0e6d3433a4bb251e2a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_affinity_propagation.py @@ -0,0 +1,321 @@ +""" +Testing for Clustering methods + +""" + +import warnings + +import numpy as np +import pytest + +from sklearn.cluster import AffinityPropagation, affinity_propagation +from sklearn.cluster._affinity_propagation import _equal_similarities_and_preferences +from sklearn.datasets import make_blobs +from sklearn.exceptions import ConvergenceWarning, NotFittedError +from sklearn.metrics import euclidean_distances +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import CSR_CONTAINERS + +n_clusters = 3 +centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 +X, _ = make_blobs( + n_samples=60, + n_features=2, + centers=centers, + cluster_std=0.4, + shuffle=True, + random_state=0, +) + +# TODO: AffinityPropagation must preserve dtype for its fitted attributes +# and test must be created accordingly to this new behavior. +# For more details, see: https://github.com/scikit-learn/scikit-learn/issues/11000 + + +def test_affinity_propagation(global_random_seed, global_dtype): + """Test consistency of the affinity propagations.""" + S = -euclidean_distances(X.astype(global_dtype, copy=False), squared=True) + preference = np.median(S) * 10 + cluster_centers_indices, labels = affinity_propagation( + S, preference=preference, random_state=global_random_seed + ) + + n_clusters_ = len(cluster_centers_indices) + + assert n_clusters == n_clusters_ + + +def test_affinity_propagation_precomputed(): + """Check equality of precomputed affinity matrix to internally computed affinity + matrix. + """ + S = -euclidean_distances(X, squared=True) + preference = np.median(S) * 10 + af = AffinityPropagation( + preference=preference, affinity="precomputed", random_state=28 + ) + labels_precomputed = af.fit(S).labels_ + + af = AffinityPropagation(preference=preference, verbose=True, random_state=37) + labels = af.fit(X).labels_ + + assert_array_equal(labels, labels_precomputed) + + cluster_centers_indices = af.cluster_centers_indices_ + + n_clusters_ = len(cluster_centers_indices) + assert np.unique(labels).size == n_clusters_ + assert n_clusters == n_clusters_ + + +def test_affinity_propagation_no_copy(): + """Check behaviour of not copying the input data.""" + S = -euclidean_distances(X, squared=True) + S_original = S.copy() + preference = np.median(S) * 10 + assert not np.allclose(S.diagonal(), preference) + + # with copy=True S should not be modified + affinity_propagation(S, preference=preference, copy=True, random_state=0) + assert_allclose(S, S_original) + assert not np.allclose(S.diagonal(), preference) + assert_allclose(S.diagonal(), np.zeros(S.shape[0])) + + # with copy=False S will be modified inplace + affinity_propagation(S, preference=preference, copy=False, random_state=0) + assert_allclose(S.diagonal(), preference) + + # test that copy=True and copy=False lead to the same result + S = S_original.copy() + af = AffinityPropagation(preference=preference, verbose=True, random_state=0) + + labels = af.fit(X).labels_ + _, labels_no_copy = affinity_propagation( + S, preference=preference, copy=False, random_state=74 + ) + assert_array_equal(labels, labels_no_copy) + + +def test_affinity_propagation_affinity_shape(): + """Check the shape of the affinity matrix when using `affinity_propagation.""" + S = -euclidean_distances(X, squared=True) + err_msg = "The matrix of similarities must be a square array" + with pytest.raises(ValueError, match=err_msg): + affinity_propagation(S[:, :-1]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_affinity_propagation_precomputed_with_sparse_input(csr_container): + err_msg = "Sparse data was passed for X, but dense data is required" + with pytest.raises(TypeError, match=err_msg): + AffinityPropagation(affinity="precomputed").fit(csr_container((3, 3))) + + +def test_affinity_propagation_predict(global_random_seed, global_dtype): + # Test AffinityPropagation.predict + af = AffinityPropagation(affinity="euclidean", random_state=global_random_seed) + X_ = X.astype(global_dtype, copy=False) + labels = af.fit_predict(X_) + labels2 = af.predict(X_) + assert_array_equal(labels, labels2) + + +def test_affinity_propagation_predict_error(): + # Test exception in AffinityPropagation.predict + # Not fitted. + af = AffinityPropagation(affinity="euclidean") + with pytest.raises(NotFittedError): + af.predict(X) + + # Predict not supported when affinity="precomputed". + S = np.dot(X, X.T) + af = AffinityPropagation(affinity="precomputed", random_state=57) + af.fit(S) + with pytest.raises(ValueError, match="expecting 60 features as input"): + af.predict(X) + + +def test_affinity_propagation_fit_non_convergence(global_dtype): + # In case of non-convergence of affinity_propagation(), the cluster + # centers should be an empty array and training samples should be labelled + # as noise (-1) + X = np.array([[0, 0], [1, 1], [-2, -2]], dtype=global_dtype) + + # Force non-convergence by allowing only a single iteration + af = AffinityPropagation(preference=-10, max_iter=1, random_state=82) + + with pytest.warns(ConvergenceWarning): + af.fit(X) + assert_allclose(np.empty((0, 2)), af.cluster_centers_) + assert_array_equal(np.array([-1, -1, -1]), af.labels_) + + +def test_affinity_propagation_equal_mutual_similarities(global_dtype): + X = np.array([[-1, 1], [1, -1]], dtype=global_dtype) + S = -euclidean_distances(X, squared=True) + + # setting preference > similarity + with pytest.warns(UserWarning, match="mutually equal"): + cluster_center_indices, labels = affinity_propagation(S, preference=0) + + # expect every sample to become an exemplar + assert_array_equal([0, 1], cluster_center_indices) + assert_array_equal([0, 1], labels) + + # setting preference < similarity + with pytest.warns(UserWarning, match="mutually equal"): + cluster_center_indices, labels = affinity_propagation(S, preference=-10) + + # expect one cluster, with arbitrary (first) sample as exemplar + assert_array_equal([0], cluster_center_indices) + assert_array_equal([0, 0], labels) + + # setting different preferences + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + cluster_center_indices, labels = affinity_propagation( + S, preference=[-20, -10], random_state=37 + ) + + # expect one cluster, with highest-preference sample as exemplar + assert_array_equal([1], cluster_center_indices) + assert_array_equal([0, 0], labels) + + +def test_affinity_propagation_predict_non_convergence(global_dtype): + # In case of non-convergence of affinity_propagation(), the cluster + # centers should be an empty array + X = np.array([[0, 0], [1, 1], [-2, -2]], dtype=global_dtype) + + # Force non-convergence by allowing only a single iteration + with pytest.warns(ConvergenceWarning): + af = AffinityPropagation(preference=-10, max_iter=1, random_state=75).fit(X) + + # At prediction time, consider new samples as noise since there are no + # clusters + to_predict = np.array([[2, 2], [3, 3], [4, 4]]) + with pytest.warns(ConvergenceWarning): + y = af.predict(to_predict) + assert_array_equal(np.array([-1, -1, -1]), y) + + +def test_affinity_propagation_non_convergence_regressiontest(global_dtype): + X = np.array( + [[1, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 1]], dtype=global_dtype + ) + af = AffinityPropagation(affinity="euclidean", max_iter=2, random_state=34) + msg = ( + "Affinity propagation did not converge, this model may return degenerate" + " cluster centers and labels." + ) + with pytest.warns(ConvergenceWarning, match=msg): + af.fit(X) + + assert_array_equal(np.array([0, 0, 0]), af.labels_) + + +def test_equal_similarities_and_preferences(global_dtype): + # Unequal distances + X = np.array([[0, 0], [1, 1], [-2, -2]], dtype=global_dtype) + S = -euclidean_distances(X, squared=True) + + assert not _equal_similarities_and_preferences(S, np.array(0)) + assert not _equal_similarities_and_preferences(S, np.array([0, 0])) + assert not _equal_similarities_and_preferences(S, np.array([0, 1])) + + # Equal distances + X = np.array([[0, 0], [1, 1]], dtype=global_dtype) + S = -euclidean_distances(X, squared=True) + + # Different preferences + assert not _equal_similarities_and_preferences(S, np.array([0, 1])) + + # Same preferences + assert _equal_similarities_and_preferences(S, np.array([0, 0])) + assert _equal_similarities_and_preferences(S, np.array(0)) + + +def test_affinity_propagation_random_state(): + """Check that different random states lead to different initialisations + by looking at the center locations after two iterations. + """ + centers = [[1, 1], [-1, -1], [1, -1]] + X, labels_true = make_blobs( + n_samples=300, centers=centers, cluster_std=0.5, random_state=0 + ) + # random_state = 0 + ap = AffinityPropagation(convergence_iter=1, max_iter=2, random_state=0) + ap.fit(X) + centers0 = ap.cluster_centers_ + + # random_state = 76 + ap = AffinityPropagation(convergence_iter=1, max_iter=2, random_state=76) + ap.fit(X) + centers76 = ap.cluster_centers_ + # check that the centers have not yet converged to the same solution + assert np.mean((centers0 - centers76) ** 2) > 1 + + +@pytest.mark.parametrize("container", CSR_CONTAINERS + [np.array]) +def test_affinity_propagation_convergence_warning_dense_sparse(container, global_dtype): + """ + Check that having sparse or dense `centers` format should not + influence the convergence. + Non-regression test for gh-13334. + """ + centers = container(np.zeros((1, 10))) + rng = np.random.RandomState(42) + X = rng.rand(40, 10).astype(global_dtype, copy=False) + y = (4 * rng.rand(40)).astype(int) + ap = AffinityPropagation(random_state=46) + ap.fit(X, y) + ap.cluster_centers_ = centers + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + assert_array_equal(ap.predict(X), np.zeros(X.shape[0], dtype=int)) + + +# FIXME; this test is broken with different random states, needs to be revisited +def test_correct_clusters(global_dtype): + # Test to fix incorrect clusters due to dtype change + # (non-regression test for issue #10832) + X = np.array( + [[1, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 1]], dtype=global_dtype + ) + afp = AffinityPropagation(preference=1, affinity="precomputed", random_state=0).fit( + X + ) + expected = np.array([0, 1, 1, 2]) + assert_array_equal(afp.labels_, expected) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input_for_predict(csr_container): + # Test to make sure sparse inputs are accepted for predict + # (non-regression test for issue #20049) + af = AffinityPropagation(affinity="euclidean", random_state=42) + af.fit(X) + labels = af.predict(csr_container((2, 2))) + assert_array_equal(labels, (2, 2)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input_for_fit_predict(csr_container): + # Test to make sure sparse inputs are accepted for fit_predict + # (non-regression test for issue #20049) + af = AffinityPropagation(affinity="euclidean", random_state=42) + rng = np.random.RandomState(42) + X = csr_container(rng.randint(0, 2, size=(5, 5))) + labels = af.fit_predict(X) + assert_array_equal(labels, (0, 1, 1, 2, 3)) + + +def test_affinity_propagation_equal_points(): + """Make sure we do not assign multiple clusters to equal points. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/20043 + """ + X = np.zeros((8, 1)) + af = AffinityPropagation(affinity="euclidean", damping=0.5, random_state=42).fit(X) + assert np.all(af.labels_ == 0) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_bicluster.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_bicluster.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc845a7bf262c60cf9f039e5ce021d841bdf4d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_bicluster.py @@ -0,0 +1,264 @@ +"""Testing for Spectral Biclustering methods""" + +import numpy as np +import pytest +from scipy.sparse import issparse + +from sklearn.base import BaseEstimator, BiclusterMixin +from sklearn.cluster import SpectralBiclustering, SpectralCoclustering +from sklearn.cluster._bicluster import ( + _bistochastic_normalize, + _log_normalize, + _scale_normalize, +) +from sklearn.datasets import make_biclusters, make_checkerboard +from sklearn.metrics import consensus_score, v_measure_score +from sklearn.model_selection import ParameterGrid +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +class MockBiclustering(BiclusterMixin, BaseEstimator): + # Mock object for testing get_submatrix. + def __init__(self): + pass + + def get_indices(self, i): + # Overridden to reproduce old get_submatrix test. + return ( + np.where([True, True, False, False, True])[0], + np.where([False, False, True, True])[0], + ) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_get_submatrix(csr_container): + data = np.arange(20).reshape(5, 4) + model = MockBiclustering() + + for X in (data, csr_container(data), data.tolist()): + submatrix = model.get_submatrix(0, X) + if issparse(submatrix): + submatrix = submatrix.toarray() + assert_array_equal(submatrix, [[2, 3], [6, 7], [18, 19]]) + submatrix[:] = -1 + if issparse(X): + X = X.toarray() + assert np.all(X != -1) + + +def _test_shape_indices(model): + # Test get_shape and get_indices on fitted model. + for i in range(model.n_clusters): + m, n = model.get_shape(i) + i_ind, j_ind = model.get_indices(i) + assert len(i_ind) == m + assert len(j_ind) == n + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_spectral_coclustering(global_random_seed, csr_container): + # Test Dhillon's Spectral CoClustering on a simple problem. + param_grid = { + "svd_method": ["randomized", "arpack"], + "n_svd_vecs": [None, 20], + "mini_batch": [False, True], + "init": ["k-means++"], + "n_init": [10], + } + S, rows, cols = make_biclusters( + (30, 30), 3, noise=0.1, random_state=global_random_seed + ) + S -= S.min() # needs to be nonnegative before making it sparse + S = np.where(S < 1, 0, S) # threshold some values + for mat in (S, csr_container(S)): + for kwargs in ParameterGrid(param_grid): + model = SpectralCoclustering( + n_clusters=3, random_state=global_random_seed, **kwargs + ) + model.fit(mat) + + assert model.rows_.shape == (3, 30) + assert_array_equal(model.rows_.sum(axis=0), np.ones(30)) + assert_array_equal(model.columns_.sum(axis=0), np.ones(30)) + assert consensus_score(model.biclusters_, (rows, cols)) == 1 + + _test_shape_indices(model) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_spectral_biclustering(global_random_seed, csr_container): + # Test Kluger methods on a checkerboard dataset. + S, rows, cols = make_checkerboard( + (30, 30), 3, noise=0.5, random_state=global_random_seed + ) + + non_default_params = { + "method": ["scale", "log"], + "svd_method": ["arpack"], + "n_svd_vecs": [20], + "mini_batch": [True], + } + + for mat in (S, csr_container(S)): + for param_name, param_values in non_default_params.items(): + for param_value in param_values: + model = SpectralBiclustering( + n_clusters=3, + n_init=3, + init="k-means++", + random_state=global_random_seed, + ) + model.set_params(**dict([(param_name, param_value)])) + + if issparse(mat) and model.get_params().get("method") == "log": + # cannot take log of sparse matrix + with pytest.raises(ValueError): + model.fit(mat) + continue + else: + model.fit(mat) + + assert model.rows_.shape == (9, 30) + assert model.columns_.shape == (9, 30) + assert_array_equal(model.rows_.sum(axis=0), np.repeat(3, 30)) + assert_array_equal(model.columns_.sum(axis=0), np.repeat(3, 30)) + assert consensus_score(model.biclusters_, (rows, cols)) == 1 + + _test_shape_indices(model) + + +def _do_scale_test(scaled): + """Check that rows sum to one constant, and columns to another.""" + row_sum = scaled.sum(axis=1) + col_sum = scaled.sum(axis=0) + if issparse(scaled): + row_sum = np.asarray(row_sum).squeeze() + col_sum = np.asarray(col_sum).squeeze() + assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100), decimal=1) + assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100), decimal=1) + + +def _do_bistochastic_test(scaled): + """Check that rows and columns sum to the same constant.""" + _do_scale_test(scaled) + assert_almost_equal(scaled.sum(axis=0).mean(), scaled.sum(axis=1).mean(), decimal=1) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_scale_normalize(global_random_seed, csr_container): + generator = np.random.RandomState(global_random_seed) + X = generator.rand(100, 100) + for mat in (X, csr_container(X)): + scaled, _, _ = _scale_normalize(mat) + _do_scale_test(scaled) + if issparse(mat): + assert issparse(scaled) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_bistochastic_normalize(global_random_seed, csr_container): + generator = np.random.RandomState(global_random_seed) + X = generator.rand(100, 100) + for mat in (X, csr_container(X)): + scaled = _bistochastic_normalize(mat) + _do_bistochastic_test(scaled) + if issparse(mat): + assert issparse(scaled) + + +def test_log_normalize(global_random_seed): + # adding any constant to a log-scaled matrix should make it + # bistochastic + generator = np.random.RandomState(global_random_seed) + mat = generator.rand(100, 100) + scaled = _log_normalize(mat) + 1 + _do_bistochastic_test(scaled) + + +def test_fit_best_piecewise(global_random_seed): + model = SpectralBiclustering(random_state=global_random_seed) + vectors = np.array([[0, 0, 0, 1, 1, 1], [2, 2, 2, 3, 3, 3], [0, 1, 2, 3, 4, 5]]) + best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2) + assert_array_equal(best, vectors[:2]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_project_and_cluster(global_random_seed, csr_container): + model = SpectralBiclustering(random_state=global_random_seed) + data = np.array([[1, 1, 1], [1, 1, 1], [3, 6, 3], [3, 6, 3]]) + vectors = np.array([[1, 0], [0, 1], [0, 0]]) + for mat in (data, csr_container(data)): + labels = model._project_and_cluster(mat, vectors, n_clusters=2) + assert_almost_equal(v_measure_score(labels, [0, 0, 1, 1]), 1.0) + + +def test_perfect_checkerboard(global_random_seed): + # XXX Previously failed on build bot (not reproducible) + model = SpectralBiclustering( + 3, svd_method="arpack", random_state=global_random_seed + ) + + S, rows, cols = make_checkerboard( + (30, 30), 3, noise=0, random_state=global_random_seed + ) + model.fit(S) + assert consensus_score(model.biclusters_, (rows, cols)) == 1 + + S, rows, cols = make_checkerboard( + (40, 30), 3, noise=0, random_state=global_random_seed + ) + model.fit(S) + assert consensus_score(model.biclusters_, (rows, cols)) == 1 + + S, rows, cols = make_checkerboard( + (30, 40), 3, noise=0, random_state=global_random_seed + ) + model.fit(S) + assert consensus_score(model.biclusters_, (rows, cols)) == 1 + + +@pytest.mark.parametrize( + "params, type_err, err_msg", + [ + ( + {"n_clusters": 6}, + ValueError, + "n_clusters should be <= n_samples=5", + ), + ( + {"n_clusters": (3, 3, 3)}, + ValueError, + "Incorrect parameter n_clusters", + ), + ( + {"n_clusters": (3, 6)}, + ValueError, + "Incorrect parameter n_clusters", + ), + ( + {"n_components": 3, "n_best": 4}, + ValueError, + "n_best=4 must be <= n_components=3", + ), + ], +) +def test_spectralbiclustering_parameter_validation(params, type_err, err_msg): + """Check parameters validation in `SpectralBiClustering`""" + data = np.arange(25).reshape((5, 5)) + model = SpectralBiclustering(**params) + with pytest.raises(type_err, match=err_msg): + model.fit(data) + + +@pytest.mark.parametrize("est", (SpectralBiclustering(), SpectralCoclustering())) +def test_n_features_in_(est): + X, _, _ = make_biclusters((3, 3), 3, random_state=0) + + assert not hasattr(est, "n_features_in_") + est.fit(X) + assert est.n_features_in_ == 3 diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_birch.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_birch.py new file mode 100644 index 0000000000000000000000000000000000000000..fc1c702d1f462b877ea70dcaa43667bdf446b589 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_birch.py @@ -0,0 +1,242 @@ +""" +Tests for the birch clustering algorithm. +""" + +import numpy as np +import pytest + +from sklearn.cluster import AgglomerativeClustering, Birch +from sklearn.cluster.tests.common import generate_clustered_data +from sklearn.datasets import make_blobs +from sklearn.exceptions import ConvergenceWarning +from sklearn.metrics import pairwise_distances_argmin, v_measure_score +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import CSR_CONTAINERS + + +def test_n_samples_leaves_roots(global_random_seed, global_dtype): + # Sanity check for the number of samples in leaves and roots + X, y = make_blobs(n_samples=10, random_state=global_random_seed) + X = X.astype(global_dtype, copy=False) + brc = Birch() + brc.fit(X) + n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_]) + n_samples_leaves = sum( + [sc.n_samples_ for leaf in brc._get_leaves() for sc in leaf.subclusters_] + ) + assert n_samples_leaves == X.shape[0] + assert n_samples_root == X.shape[0] + + +def test_partial_fit(global_random_seed, global_dtype): + # Test that fit is equivalent to calling partial_fit multiple times + X, y = make_blobs(n_samples=100, random_state=global_random_seed) + X = X.astype(global_dtype, copy=False) + brc = Birch(n_clusters=3) + brc.fit(X) + brc_partial = Birch(n_clusters=None) + brc_partial.partial_fit(X[:50]) + brc_partial.partial_fit(X[50:]) + assert_allclose(brc_partial.subcluster_centers_, brc.subcluster_centers_) + + # Test that same global labels are obtained after calling partial_fit + # with None + brc_partial.set_params(n_clusters=3) + brc_partial.partial_fit(None) + assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_) + + +def test_birch_predict(global_random_seed, global_dtype): + # Test the predict method predicts the nearest centroid. + rng = np.random.RandomState(global_random_seed) + X = generate_clustered_data(n_clusters=3, n_features=3, n_samples_per_cluster=10) + X = X.astype(global_dtype, copy=False) + + # n_samples * n_samples_per_cluster + shuffle_indices = np.arange(30) + rng.shuffle(shuffle_indices) + X_shuffle = X[shuffle_indices, :] + brc = Birch(n_clusters=4, threshold=1.0) + brc.fit(X_shuffle) + + # Birch must preserve inputs' dtype + assert brc.subcluster_centers_.dtype == global_dtype + + assert_array_equal(brc.labels_, brc.predict(X_shuffle)) + centroids = brc.subcluster_centers_ + nearest_centroid = brc.subcluster_labels_[ + pairwise_distances_argmin(X_shuffle, centroids) + ] + assert_allclose(v_measure_score(nearest_centroid, brc.labels_), 1.0) + + +def test_n_clusters(global_random_seed, global_dtype): + # Test that n_clusters param works properly + X, y = make_blobs(n_samples=100, centers=10, random_state=global_random_seed) + X = X.astype(global_dtype, copy=False) + brc1 = Birch(n_clusters=10) + brc1.fit(X) + assert len(brc1.subcluster_centers_) > 10 + assert len(np.unique(brc1.labels_)) == 10 + + # Test that n_clusters = Agglomerative Clustering gives + # the same results. + gc = AgglomerativeClustering(n_clusters=10) + brc2 = Birch(n_clusters=gc) + brc2.fit(X) + assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_) + assert_array_equal(brc1.labels_, brc2.labels_) + + # Test that a small number of clusters raises a warning. + brc4 = Birch(threshold=10000.0) + with pytest.warns(ConvergenceWarning): + brc4.fit(X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_X(global_random_seed, global_dtype, csr_container): + # Test that sparse and dense data give same results + X, y = make_blobs(n_samples=100, centers=10, random_state=global_random_seed) + X = X.astype(global_dtype, copy=False) + brc = Birch(n_clusters=10) + brc.fit(X) + + csr = csr_container(X) + brc_sparse = Birch(n_clusters=10) + brc_sparse.fit(csr) + + # Birch must preserve inputs' dtype + assert brc_sparse.subcluster_centers_.dtype == global_dtype + + assert_array_equal(brc.labels_, brc_sparse.labels_) + assert_allclose(brc.subcluster_centers_, brc_sparse.subcluster_centers_) + + +def test_partial_fit_second_call_error_checks(): + # second partial fit calls will error when n_features is not consistent + # with the first call + X, y = make_blobs(n_samples=100) + brc = Birch(n_clusters=3) + brc.partial_fit(X, y) + + msg = "X has 1 features, but Birch is expecting 2 features" + with pytest.raises(ValueError, match=msg): + brc.partial_fit(X[:, [0]], y) + + +def check_branching_factor(node, branching_factor): + subclusters = node.subclusters_ + assert branching_factor >= len(subclusters) + for cluster in subclusters: + if cluster.child_: + check_branching_factor(cluster.child_, branching_factor) + + +def test_branching_factor(global_random_seed, global_dtype): + # Test that nodes have at max branching_factor number of subclusters + X, y = make_blobs(random_state=global_random_seed) + X = X.astype(global_dtype, copy=False) + branching_factor = 9 + + # Purposefully set a low threshold to maximize the subclusters. + brc = Birch(n_clusters=None, branching_factor=branching_factor, threshold=0.01) + brc.fit(X) + check_branching_factor(brc.root_, branching_factor) + brc = Birch(n_clusters=3, branching_factor=branching_factor, threshold=0.01) + brc.fit(X) + check_branching_factor(brc.root_, branching_factor) + + +def check_threshold(birch_instance, threshold): + """Use the leaf linked list for traversal""" + current_leaf = birch_instance.dummy_leaf_.next_leaf_ + while current_leaf: + subclusters = current_leaf.subclusters_ + for sc in subclusters: + assert threshold >= sc.radius + current_leaf = current_leaf.next_leaf_ + + +def test_threshold(global_random_seed, global_dtype): + # Test that the leaf subclusters have a threshold lesser than radius + X, y = make_blobs(n_samples=80, centers=4, random_state=global_random_seed) + X = X.astype(global_dtype, copy=False) + brc = Birch(threshold=0.5, n_clusters=None) + brc.fit(X) + check_threshold(brc, 0.5) + + brc = Birch(threshold=5.0, n_clusters=None) + brc.fit(X) + check_threshold(brc, 5.0) + + +def test_birch_n_clusters_long_int(): + # Check that birch supports n_clusters with np.int64 dtype, for instance + # coming from np.arange. #16484 + X, _ = make_blobs(random_state=0) + n_clusters = np.int64(5) + Birch(n_clusters=n_clusters).fit(X) + + +def test_feature_names_out(): + """Check `get_feature_names_out` for `Birch`.""" + X, _ = make_blobs(n_samples=80, n_features=4, random_state=0) + brc = Birch(n_clusters=4) + brc.fit(X) + n_clusters = brc.subcluster_centers_.shape[0] + + names_out = brc.get_feature_names_out() + assert_array_equal([f"birch{i}" for i in range(n_clusters)], names_out) + + +def test_transform_match_across_dtypes(global_random_seed): + X, _ = make_blobs(n_samples=80, n_features=4, random_state=global_random_seed) + brc = Birch(n_clusters=4, threshold=1.1) + Y_64 = brc.fit_transform(X) + Y_32 = brc.fit_transform(X.astype(np.float32)) + + assert_allclose(Y_64, Y_32, atol=1e-6) + + +def test_subcluster_dtype(global_dtype): + X = make_blobs(n_samples=80, n_features=4, random_state=0)[0].astype( + global_dtype, copy=False + ) + brc = Birch(n_clusters=4) + assert brc.fit(X).subcluster_centers_.dtype == global_dtype + + +def test_both_subclusters_updated(): + """Check that both subclusters are updated when a node a split, even when there are + duplicated data points. Non-regression test for #23269. + """ + + X = np.array( + [ + [-2.6192791, -1.5053215], + [-2.9993038, -1.6863596], + [-2.3724914, -1.3438171], + [-2.336792, -1.3417323], + [-2.4089134, -1.3290224], + [-2.3724914, -1.3438171], + [-3.364009, -1.8846745], + [-2.3724914, -1.3438171], + [-2.617677, -1.5003285], + [-2.2960556, -1.3260119], + [-2.3724914, -1.3438171], + [-2.5459878, -1.4533926], + [-2.25979, -1.3003055], + [-2.4089134, -1.3290224], + [-2.3724914, -1.3438171], + [-2.4089134, -1.3290224], + [-2.5459878, -1.4533926], + [-2.3724914, -1.3438171], + [-2.9720619, -1.7058647], + [-2.336792, -1.3417323], + [-2.3724914, -1.3438171], + ], + dtype=np.float32, + ) + + # no error + Birch(branching_factor=5, threshold=1e-5, n_clusters=None).fit(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_bisect_k_means.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_bisect_k_means.py new file mode 100644 index 0000000000000000000000000000000000000000..799ddbc086ce0a14397fe5cb4aef607903c01228 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_bisect_k_means.py @@ -0,0 +1,158 @@ +import numpy as np +import pytest + +from sklearn.cluster import BisectingKMeans +from sklearn.metrics import v_measure_score +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import CSR_CONTAINERS + + +@pytest.mark.parametrize("bisecting_strategy", ["biggest_inertia", "largest_cluster"]) +@pytest.mark.parametrize("init", ["k-means++", "random"]) +def test_three_clusters(bisecting_strategy, init): + """Tries to perform bisect k-means for three clusters to check + if splitting data is performed correctly. + """ + X = np.array( + [[1, 1], [10, 1], [3, 1], [10, 0], [2, 1], [10, 2], [10, 8], [10, 9], [10, 10]] + ) + bisect_means = BisectingKMeans( + n_clusters=3, + random_state=0, + bisecting_strategy=bisecting_strategy, + init=init, + ) + bisect_means.fit(X) + + expected_centers = [[2, 1], [10, 1], [10, 9]] + expected_labels = [0, 1, 0, 1, 0, 1, 2, 2, 2] + + assert_allclose( + sorted(expected_centers), sorted(bisect_means.cluster_centers_.tolist()) + ) + assert_allclose(v_measure_score(expected_labels, bisect_means.labels_), 1.0) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse(csr_container): + """Test Bisecting K-Means with sparse data. + + Checks if labels and centers are the same between dense and sparse. + """ + + rng = np.random.RandomState(0) + + X = rng.rand(20, 2) + X[X < 0.8] = 0 + X_csr = csr_container(X) + + bisect_means = BisectingKMeans(n_clusters=3, random_state=0) + + bisect_means.fit(X_csr) + sparse_centers = bisect_means.cluster_centers_ + + bisect_means.fit(X) + normal_centers = bisect_means.cluster_centers_ + + # Check if results is the same for dense and sparse data + assert_allclose(normal_centers, sparse_centers, atol=1e-8) + + +@pytest.mark.parametrize("n_clusters", [4, 5]) +def test_n_clusters(n_clusters): + """Test if resulting labels are in range [0, n_clusters - 1].""" + + rng = np.random.RandomState(0) + X = rng.rand(10, 2) + + bisect_means = BisectingKMeans(n_clusters=n_clusters, random_state=0) + bisect_means.fit(X) + + assert_array_equal(np.unique(bisect_means.labels_), np.arange(n_clusters)) + + +def test_one_cluster(): + """Test single cluster.""" + + X = np.array([[1, 2], [10, 2], [10, 8]]) + + bisect_means = BisectingKMeans(n_clusters=1, random_state=0).fit(X) + + # All labels from fit or predict should be equal 0 + assert all(bisect_means.labels_ == 0) + assert all(bisect_means.predict(X) == 0) + + assert_allclose(bisect_means.cluster_centers_, X.mean(axis=0).reshape(1, -1)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None]) +def test_fit_predict(csr_container): + """Check if labels from fit(X) method are same as from fit(X).predict(X).""" + rng = np.random.RandomState(0) + + X = rng.rand(10, 2) + + if csr_container is not None: + X[X < 0.8] = 0 + X = csr_container(X) + + bisect_means = BisectingKMeans(n_clusters=3, random_state=0) + bisect_means.fit(X) + + assert_array_equal(bisect_means.labels_, bisect_means.predict(X)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None]) +def test_dtype_preserved(csr_container, global_dtype): + """Check that centers dtype is the same as input data dtype.""" + rng = np.random.RandomState(0) + X = rng.rand(10, 2).astype(global_dtype, copy=False) + + if csr_container is not None: + X[X < 0.8] = 0 + X = csr_container(X) + + km = BisectingKMeans(n_clusters=3, random_state=0) + km.fit(X) + + assert km.cluster_centers_.dtype == global_dtype + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None]) +def test_float32_float64_equivalence(csr_container): + """Check that the results are the same between float32 and float64.""" + rng = np.random.RandomState(0) + X = rng.rand(10, 2) + + if csr_container is not None: + X[X < 0.8] = 0 + X = csr_container(X) + + km64 = BisectingKMeans(n_clusters=3, random_state=0).fit(X) + km32 = BisectingKMeans(n_clusters=3, random_state=0).fit(X.astype(np.float32)) + + assert_allclose(km32.cluster_centers_, km64.cluster_centers_) + assert_array_equal(km32.labels_, km64.labels_) + + +@pytest.mark.parametrize("algorithm", ("lloyd", "elkan")) +def test_no_crash_on_empty_bisections(algorithm): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/27081 + rng = np.random.RandomState(0) + X_train = rng.rand(3000, 10) + bkm = BisectingKMeans(n_clusters=10, algorithm=algorithm).fit(X_train) + + # predict on scaled data to trigger pathologic case + # where the inner mask leads to empty bisections. + X_test = 50 * rng.rand(100, 10) + labels = bkm.predict(X_test) # should not crash with idiv by 0 + assert np.isin(np.unique(labels), np.arange(10)).all() + + +def test_one_feature(): + # Check that no error is raised when there is only one feature + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/27236 + X = np.random.normal(size=(128, 1)) + BisectingKMeans(bisecting_strategy="biggest_inertia", random_state=0).fit(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_dbscan.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_dbscan.py new file mode 100644 index 0000000000000000000000000000000000000000..d42cc2b17d518515c31f8420e96db7f2ef05b4d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_dbscan.py @@ -0,0 +1,434 @@ +""" +Tests for DBSCAN clustering algorithm +""" + +import pickle +import warnings + +import numpy as np +import pytest +from scipy.spatial import distance + +from sklearn.cluster import DBSCAN, dbscan +from sklearn.cluster.tests.common import generate_clustered_data +from sklearn.metrics.pairwise import pairwise_distances +from sklearn.neighbors import NearestNeighbors +from sklearn.utils._testing import assert_array_equal +from sklearn.utils.fixes import CSR_CONTAINERS, LIL_CONTAINERS + +n_clusters = 3 +X = generate_clustered_data(n_clusters=n_clusters) + + +def test_dbscan_similarity(): + # Tests the DBSCAN algorithm with a similarity array. + # Parameters chosen specifically for this task. + eps = 0.15 + min_samples = 10 + # Compute similarities + D = distance.squareform(distance.pdist(X)) + D /= np.max(D) + # Compute DBSCAN + core_samples, labels = dbscan( + D, metric="precomputed", eps=eps, min_samples=min_samples + ) + # number of clusters, ignoring noise if present + n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0) + + assert n_clusters_1 == n_clusters + + db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples) + labels = db.fit(D).labels_ + + n_clusters_2 = len(set(labels)) - int(-1 in labels) + assert n_clusters_2 == n_clusters + + +def test_dbscan_feature(): + # Tests the DBSCAN algorithm with a feature vector array. + # Parameters chosen specifically for this task. + # Different eps to other test, because distance is not normalised. + eps = 0.8 + min_samples = 10 + metric = "euclidean" + # Compute DBSCAN + # parameters chosen for task + core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples) + + # number of clusters, ignoring noise if present + n_clusters_1 = len(set(labels)) - int(-1 in labels) + assert n_clusters_1 == n_clusters + + db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples) + labels = db.fit(X).labels_ + + n_clusters_2 = len(set(labels)) - int(-1 in labels) + assert n_clusters_2 == n_clusters + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_dbscan_sparse(lil_container): + core_sparse, labels_sparse = dbscan(lil_container(X), eps=0.8, min_samples=10) + core_dense, labels_dense = dbscan(X, eps=0.8, min_samples=10) + assert_array_equal(core_dense, core_sparse) + assert_array_equal(labels_dense, labels_sparse) + + +@pytest.mark.parametrize("include_self", [False, True]) +def test_dbscan_sparse_precomputed(include_self): + D = pairwise_distances(X) + nn = NearestNeighbors(radius=0.9).fit(X) + X_ = X if include_self else None + D_sparse = nn.radius_neighbors_graph(X=X_, mode="distance") + # Ensure it is sparse not merely on diagonals: + assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1) + core_sparse, labels_sparse = dbscan( + D_sparse, eps=0.8, min_samples=10, metric="precomputed" + ) + core_dense, labels_dense = dbscan(D, eps=0.8, min_samples=10, metric="precomputed") + assert_array_equal(core_dense, core_sparse) + assert_array_equal(labels_dense, labels_sparse) + + +def test_dbscan_sparse_precomputed_different_eps(): + # test that precomputed neighbors graph is filtered if computed with + # a radius larger than DBSCAN's eps. + lower_eps = 0.2 + nn = NearestNeighbors(radius=lower_eps).fit(X) + D_sparse = nn.radius_neighbors_graph(X, mode="distance") + dbscan_lower = dbscan(D_sparse, eps=lower_eps, metric="precomputed") + + higher_eps = lower_eps + 0.7 + nn = NearestNeighbors(radius=higher_eps).fit(X) + D_sparse = nn.radius_neighbors_graph(X, mode="distance") + dbscan_higher = dbscan(D_sparse, eps=lower_eps, metric="precomputed") + + assert_array_equal(dbscan_lower[0], dbscan_higher[0]) + assert_array_equal(dbscan_lower[1], dbscan_higher[1]) + + +@pytest.mark.parametrize("metric", ["precomputed", "minkowski"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None]) +def test_dbscan_input_not_modified(metric, csr_container): + # test that the input is not modified by dbscan + X = np.random.RandomState(0).rand(10, 10) + X = csr_container(X) if csr_container is not None else X + X_copy = X.copy() + dbscan(X, metric=metric) + + if csr_container is not None: + assert_array_equal(X.toarray(), X_copy.toarray()) + else: + assert_array_equal(X, X_copy) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dbscan_input_not_modified_precomputed_sparse_nodiag(csr_container): + """Check that we don't modify in-place the pre-computed sparse matrix. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27508 + """ + X = np.random.RandomState(0).rand(10, 10) + # Add zeros on the diagonal that will be implicit when creating + # the sparse matrix. If `X` is modified in-place, the zeros from + # the diagonal will be made explicit. + np.fill_diagonal(X, 0) + X = csr_container(X) + assert all(row != col for row, col in zip(*X.nonzero())) + X_copy = X.copy() + dbscan(X, metric="precomputed") + # Make sure that we did not modify `X` in-place even by creating + # explicit 0s values. + assert X.nnz == X_copy.nnz + assert_array_equal(X.toarray(), X_copy.toarray()) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dbscan_no_core_samples(csr_container): + rng = np.random.RandomState(0) + X = rng.rand(40, 10) + X[X < 0.8] = 0 + + for X_ in [X, csr_container(X)]: + db = DBSCAN(min_samples=6).fit(X_) + assert_array_equal(db.components_, np.empty((0, X_.shape[1]))) + assert_array_equal(db.labels_, -1) + assert db.core_sample_indices_.shape == (0,) + + +def test_dbscan_callable(): + # Tests the DBSCAN algorithm with a callable metric. + # Parameters chosen specifically for this task. + # Different eps to other test, because distance is not normalised. + eps = 0.8 + min_samples = 10 + # metric is the function reference, not the string key. + metric = distance.euclidean + # Compute DBSCAN + # parameters chosen for task + core_samples, labels = dbscan( + X, metric=metric, eps=eps, min_samples=min_samples, algorithm="ball_tree" + ) + + # number of clusters, ignoring noise if present + n_clusters_1 = len(set(labels)) - int(-1 in labels) + assert n_clusters_1 == n_clusters + + db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples, algorithm="ball_tree") + labels = db.fit(X).labels_ + + n_clusters_2 = len(set(labels)) - int(-1 in labels) + assert n_clusters_2 == n_clusters + + +def test_dbscan_metric_params(): + # Tests that DBSCAN works with the metrics_params argument. + eps = 0.8 + min_samples = 10 + p = 1 + + # Compute DBSCAN with metric_params arg + + with warnings.catch_warnings(record=True) as warns: + db = DBSCAN( + metric="minkowski", + metric_params={"p": p}, + eps=eps, + p=None, + min_samples=min_samples, + algorithm="ball_tree", + ).fit(X) + assert not warns, warns[0].message + core_sample_1, labels_1 = db.core_sample_indices_, db.labels_ + + # Test that sample labels are the same as passing Minkowski 'p' directly + db = DBSCAN( + metric="minkowski", eps=eps, min_samples=min_samples, algorithm="ball_tree", p=p + ).fit(X) + core_sample_2, labels_2 = db.core_sample_indices_, db.labels_ + + assert_array_equal(core_sample_1, core_sample_2) + assert_array_equal(labels_1, labels_2) + + # Minkowski with p=1 should be equivalent to Manhattan distance + db = DBSCAN( + metric="manhattan", eps=eps, min_samples=min_samples, algorithm="ball_tree" + ).fit(X) + core_sample_3, labels_3 = db.core_sample_indices_, db.labels_ + + assert_array_equal(core_sample_1, core_sample_3) + assert_array_equal(labels_1, labels_3) + + with pytest.warns( + SyntaxWarning, + match=( + "Parameter p is found in metric_params. " + "The corresponding parameter from __init__ " + "is ignored." + ), + ): + # Test that checks p is ignored in favor of metric_params={'p': } + db = DBSCAN( + metric="minkowski", + metric_params={"p": p}, + eps=eps, + p=p + 1, + min_samples=min_samples, + algorithm="ball_tree", + ).fit(X) + core_sample_4, labels_4 = db.core_sample_indices_, db.labels_ + + assert_array_equal(core_sample_1, core_sample_4) + assert_array_equal(labels_1, labels_4) + + +def test_dbscan_balltree(): + # Tests the DBSCAN algorithm with balltree for neighbor calculation. + eps = 0.8 + min_samples = 10 + + D = pairwise_distances(X) + core_samples, labels = dbscan( + D, metric="precomputed", eps=eps, min_samples=min_samples + ) + + # number of clusters, ignoring noise if present + n_clusters_1 = len(set(labels)) - int(-1 in labels) + assert n_clusters_1 == n_clusters + + db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm="ball_tree") + labels = db.fit(X).labels_ + + n_clusters_2 = len(set(labels)) - int(-1 in labels) + assert n_clusters_2 == n_clusters + + db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm="kd_tree") + labels = db.fit(X).labels_ + + n_clusters_3 = len(set(labels)) - int(-1 in labels) + assert n_clusters_3 == n_clusters + + db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm="ball_tree") + labels = db.fit(X).labels_ + + n_clusters_4 = len(set(labels)) - int(-1 in labels) + assert n_clusters_4 == n_clusters + + db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples, algorithm="ball_tree") + labels = db.fit(X).labels_ + + n_clusters_5 = len(set(labels)) - int(-1 in labels) + assert n_clusters_5 == n_clusters + + +def test_input_validation(): + # DBSCAN.fit should accept a list of lists. + X = [[1.0, 2.0], [3.0, 4.0]] + DBSCAN().fit(X) # must not raise exception + + +def test_pickle(): + obj = DBSCAN() + s = pickle.dumps(obj) + assert type(pickle.loads(s)) == obj.__class__ + + +def test_boundaries(): + # ensure min_samples is inclusive of core point + core, _ = dbscan([[0], [1]], eps=2, min_samples=2) + assert 0 in core + # ensure eps is inclusive of circumference + core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2) + assert 0 in core + core, _ = dbscan([[0], [1], [1]], eps=0.99, min_samples=2) + assert 0 not in core + + +def test_weighted_dbscan(global_random_seed): + # ensure sample_weight is validated + with pytest.raises(ValueError): + dbscan([[0], [1]], sample_weight=[2]) + with pytest.raises(ValueError): + dbscan([[0], [1]], sample_weight=[2, 3, 4]) + + # ensure sample_weight has an effect + assert_array_equal([], dbscan([[0], [1]], sample_weight=None, min_samples=6)[0]) + assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5], min_samples=6)[0]) + assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5], min_samples=6)[0]) + assert_array_equal( + [0, 1], dbscan([[0], [1]], sample_weight=[6, 6], min_samples=6)[0] + ) + + # points within eps of each other: + assert_array_equal( + [0, 1], dbscan([[0], [1]], eps=1.5, sample_weight=[5, 1], min_samples=6)[0] + ) + # and effect of non-positive and non-integer sample_weight: + assert_array_equal( + [], dbscan([[0], [1]], sample_weight=[5, 0], eps=1.5, min_samples=6)[0] + ) + assert_array_equal( + [0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1], eps=1.5, min_samples=6)[0] + ) + assert_array_equal( + [0, 1], dbscan([[0], [1]], sample_weight=[6, 0], eps=1.5, min_samples=6)[0] + ) + assert_array_equal( + [], dbscan([[0], [1]], sample_weight=[6, -1], eps=1.5, min_samples=6)[0] + ) + + # for non-negative sample_weight, cores should be identical to repetition + rng = np.random.RandomState(global_random_seed) + sample_weight = rng.randint(0, 5, X.shape[0]) + core1, label1 = dbscan(X, sample_weight=sample_weight) + assert len(label1) == len(X) + + X_repeated = np.repeat(X, sample_weight, axis=0) + core_repeated, label_repeated = dbscan(X_repeated) + core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool) + core_repeated_mask[core_repeated] = True + core_mask = np.zeros(X.shape[0], dtype=bool) + core_mask[core1] = True + assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask) + + # sample_weight should work with precomputed distance matrix + D = pairwise_distances(X) + core3, label3 = dbscan(D, sample_weight=sample_weight, metric="precomputed") + assert_array_equal(core1, core3) + assert_array_equal(label1, label3) + + # sample_weight should work with estimator + est = DBSCAN().fit(X, sample_weight=sample_weight) + core4 = est.core_sample_indices_ + label4 = est.labels_ + assert_array_equal(core1, core4) + assert_array_equal(label1, label4) + + est = DBSCAN() + label5 = est.fit_predict(X, sample_weight=sample_weight) + core5 = est.core_sample_indices_ + assert_array_equal(core1, core5) + assert_array_equal(label1, label5) + assert_array_equal(label1, est.labels_) + + +@pytest.mark.parametrize("algorithm", ["brute", "kd_tree", "ball_tree"]) +def test_dbscan_core_samples_toy(algorithm): + X = [[0], [2], [3], [4], [6], [8], [10]] + n_samples = len(X) + + # Degenerate case: every sample is a core sample, either with its own + # cluster or including other close core samples. + core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=1) + assert_array_equal(core_samples, np.arange(n_samples)) + assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4]) + + # With eps=1 and min_samples=2 only the 3 samples from the denser area + # are core samples. All other points are isolated and considered noise. + core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=2) + assert_array_equal(core_samples, [1, 2, 3]) + assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1]) + + # Only the sample in the middle of the dense area is core. Its two + # neighbors are edge samples. Remaining samples are noise. + core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=3) + assert_array_equal(core_samples, [2]) + assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1]) + + # It's no longer possible to extract core samples with eps=1: + # everything is noise. + core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=4) + assert_array_equal(core_samples, []) + assert_array_equal(labels, np.full(n_samples, -1.0)) + + +def test_dbscan_precomputed_metric_with_degenerate_input_arrays(): + # see https://github.com/scikit-learn/scikit-learn/issues/4641 for + # more details + X = np.eye(10) + labels = DBSCAN(eps=0.5, metric="precomputed").fit(X).labels_ + assert len(set(labels)) == 1 + + X = np.zeros((10, 10)) + labels = DBSCAN(eps=0.5, metric="precomputed").fit(X).labels_ + assert len(set(labels)) == 1 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dbscan_precomputed_metric_with_initial_rows_zero(csr_container): + # sample matrix with initial two row all zero + ar = np.array( + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0], + [0.0, 0.0, 0.1, 0.1, 0.0, 0.0, 0.3], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1], + [0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.0], + ] + ) + matrix = csr_container(ar) + labels = DBSCAN(eps=0.2, metric="precomputed", min_samples=2).fit(matrix).labels_ + assert_array_equal(labels, [-1, -1, 0, 0, 0, 1, 1]) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_feature_agglomeration.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_feature_agglomeration.py new file mode 100644 index 0000000000000000000000000000000000000000..121e8f2cfe400c1fb5e6608cc0e120b4d1a36a1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_feature_agglomeration.py @@ -0,0 +1,80 @@ +""" +Tests for sklearn.cluster._feature_agglomeration +""" +# Authors: Sergul Aydore 2017 +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.cluster import FeatureAgglomeration +from sklearn.datasets import make_blobs +from sklearn.utils._testing import assert_array_almost_equal + + +def test_feature_agglomeration(): + n_clusters = 1 + X = np.array([0, 0, 1]).reshape(1, 3) # (n_samples, n_features) + + agglo_mean = FeatureAgglomeration(n_clusters=n_clusters, pooling_func=np.mean) + agglo_median = FeatureAgglomeration(n_clusters=n_clusters, pooling_func=np.median) + agglo_mean.fit(X) + agglo_median.fit(X) + + assert np.size(np.unique(agglo_mean.labels_)) == n_clusters + assert np.size(np.unique(agglo_median.labels_)) == n_clusters + assert np.size(agglo_mean.labels_) == X.shape[1] + assert np.size(agglo_median.labels_) == X.shape[1] + + # Test transform + Xt_mean = agglo_mean.transform(X) + Xt_median = agglo_median.transform(X) + assert Xt_mean.shape[1] == n_clusters + assert Xt_median.shape[1] == n_clusters + assert Xt_mean == np.array([1 / 3.0]) + assert Xt_median == np.array([0.0]) + + # Test inverse transform + X_full_mean = agglo_mean.inverse_transform(Xt_mean) + X_full_median = agglo_median.inverse_transform(Xt_median) + assert np.unique(X_full_mean[0]).size == n_clusters + assert np.unique(X_full_median[0]).size == n_clusters + + assert_array_almost_equal(agglo_mean.transform(X_full_mean), Xt_mean) + assert_array_almost_equal(agglo_median.transform(X_full_median), Xt_median) + + +def test_feature_agglomeration_feature_names_out(): + """Check `get_feature_names_out` for `FeatureAgglomeration`.""" + X, _ = make_blobs(n_features=6, random_state=0) + agglo = FeatureAgglomeration(n_clusters=3) + agglo.fit(X) + n_clusters = agglo.n_clusters_ + + names_out = agglo.get_feature_names_out() + assert_array_equal( + [f"featureagglomeration{i}" for i in range(n_clusters)], names_out + ) + + +# TODO(1.5): remove this test +def test_inverse_transform_Xred_deprecation(): + X = np.array([0, 0, 1]).reshape(1, 3) # (n_samples, n_features) + + est = FeatureAgglomeration(n_clusters=1, pooling_func=np.mean) + est.fit(X) + Xt = est.transform(X) + + with pytest.raises(TypeError, match="Missing required positional argument"): + est.inverse_transform() + + with pytest.raises(ValueError, match="Please provide only"): + est.inverse_transform(Xt=Xt, Xred=Xt) + + with warnings.catch_warnings(record=True): + warnings.simplefilter("error") + est.inverse_transform(Xt) + + with pytest.warns(FutureWarning, match="Input argument `Xred` was renamed to `Xt`"): + est.inverse_transform(Xred=Xt) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_hdbscan.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_hdbscan.py new file mode 100644 index 0000000000000000000000000000000000000000..6db2d4387de181358d0ec64a98035cc1293cfa3e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_hdbscan.py @@ -0,0 +1,581 @@ +""" +Tests for HDBSCAN clustering algorithm +Based on the DBSCAN test code +""" +import numpy as np +import pytest +from scipy import stats +from scipy.spatial import distance + +from sklearn.cluster import HDBSCAN +from sklearn.cluster._hdbscan._tree import ( + CONDENSED_dtype, + _condense_tree, + _do_labelling, +) +from sklearn.cluster._hdbscan.hdbscan import _OUTLIER_ENCODING +from sklearn.datasets import make_blobs +from sklearn.metrics import fowlkes_mallows_score +from sklearn.metrics.pairwise import _VALID_METRICS, euclidean_distances +from sklearn.neighbors import BallTree, KDTree +from sklearn.preprocessing import StandardScaler +from sklearn.utils import shuffle +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +X, y = make_blobs(n_samples=200, random_state=10) +X, y = shuffle(X, y, random_state=7) +X = StandardScaler().fit_transform(X) + +ALGORITHMS = [ + "kd_tree", + "ball_tree", + "brute", + "auto", +] + +OUTLIER_SET = {-1} | {out["label"] for _, out in _OUTLIER_ENCODING.items()} + + +def check_label_quality(labels, threshold=0.99): + n_clusters = len(set(labels) - OUTLIER_SET) + assert n_clusters == 3 + assert fowlkes_mallows_score(labels, y) > threshold + + +@pytest.mark.parametrize("outlier_type", _OUTLIER_ENCODING) +def test_outlier_data(outlier_type): + """ + Tests if np.inf and np.nan data are each treated as special outliers. + """ + outlier = { + "infinite": np.inf, + "missing": np.nan, + }[outlier_type] + prob_check = { + "infinite": lambda x, y: x == y, + "missing": lambda x, y: np.isnan(x), + }[outlier_type] + label = _OUTLIER_ENCODING[outlier_type]["label"] + prob = _OUTLIER_ENCODING[outlier_type]["prob"] + + X_outlier = X.copy() + X_outlier[0] = [outlier, 1] + X_outlier[5] = [outlier, outlier] + model = HDBSCAN().fit(X_outlier) + + (missing_labels_idx,) = (model.labels_ == label).nonzero() + assert_array_equal(missing_labels_idx, [0, 5]) + + (missing_probs_idx,) = (prob_check(model.probabilities_, prob)).nonzero() + assert_array_equal(missing_probs_idx, [0, 5]) + + clean_indices = list(range(1, 5)) + list(range(6, 200)) + clean_model = HDBSCAN().fit(X_outlier[clean_indices]) + assert_array_equal(clean_model.labels_, model.labels_[clean_indices]) + + +def test_hdbscan_distance_matrix(): + """ + Tests that HDBSCAN works with precomputed distance matrices, and throws the + appropriate errors when needed. + """ + D = euclidean_distances(X) + D_original = D.copy() + labels = HDBSCAN(metric="precomputed", copy=True).fit_predict(D) + + assert_allclose(D, D_original) + check_label_quality(labels) + + msg = r"The precomputed distance matrix.*has shape" + with pytest.raises(ValueError, match=msg): + HDBSCAN(metric="precomputed", copy=True).fit_predict(X) + + msg = r"The precomputed distance matrix.*values" + # Ensure the matrix is not symmetric + D[0, 1] = 10 + D[1, 0] = 1 + with pytest.raises(ValueError, match=msg): + HDBSCAN(metric="precomputed").fit_predict(D) + + +@pytest.mark.parametrize("sparse_constructor", [*CSR_CONTAINERS, *CSC_CONTAINERS]) +def test_hdbscan_sparse_distance_matrix(sparse_constructor): + """ + Tests that HDBSCAN works with sparse distance matrices. + """ + D = distance.squareform(distance.pdist(X)) + D /= np.max(D) + + threshold = stats.scoreatpercentile(D.flatten(), 50) + + D[D >= threshold] = 0.0 + D = sparse_constructor(D) + D.eliminate_zeros() + + labels = HDBSCAN(metric="precomputed").fit_predict(D) + check_label_quality(labels) + + +def test_hdbscan_feature_array(): + """ + Tests that HDBSCAN works with feature array, including an arbitrary + goodness of fit check. Note that the check is a simple heuristic. + """ + labels = HDBSCAN().fit_predict(X) + + # Check that clustering is arbitrarily good + # This is a heuristic to guard against regression + check_label_quality(labels) + + +@pytest.mark.parametrize("algo", ALGORITHMS) +@pytest.mark.parametrize("metric", _VALID_METRICS) +def test_hdbscan_algorithms(algo, metric): + """ + Tests that HDBSCAN works with the expected combinations of algorithms and + metrics, or raises the expected errors. + """ + labels = HDBSCAN(algorithm=algo).fit_predict(X) + check_label_quality(labels) + + # Validation for brute is handled by `pairwise_distances` + if algo in ("brute", "auto"): + return + + ALGOS_TREES = { + "kd_tree": KDTree, + "ball_tree": BallTree, + } + metric_params = { + "mahalanobis": {"V": np.eye(X.shape[1])}, + "seuclidean": {"V": np.ones(X.shape[1])}, + "minkowski": {"p": 2}, + "wminkowski": {"p": 2, "w": np.ones(X.shape[1])}, + }.get(metric, None) + + hdb = HDBSCAN( + algorithm=algo, + metric=metric, + metric_params=metric_params, + ) + + if metric not in ALGOS_TREES[algo].valid_metrics: + with pytest.raises(ValueError): + hdb.fit(X) + elif metric == "wminkowski": + with pytest.warns(FutureWarning): + hdb.fit(X) + else: + hdb.fit(X) + + +def test_dbscan_clustering(): + """ + Tests that HDBSCAN can generate a sufficiently accurate dbscan clustering. + This test is more of a sanity check than a rigorous evaluation. + """ + clusterer = HDBSCAN().fit(X) + labels = clusterer.dbscan_clustering(0.3) + + # We use a looser threshold due to dbscan producing a more constrained + # clustering representation + check_label_quality(labels, threshold=0.92) + + +@pytest.mark.parametrize("cut_distance", (0.1, 0.5, 1)) +def test_dbscan_clustering_outlier_data(cut_distance): + """ + Tests if np.inf and np.nan data are each treated as special outliers. + """ + missing_label = _OUTLIER_ENCODING["missing"]["label"] + infinite_label = _OUTLIER_ENCODING["infinite"]["label"] + + X_outlier = X.copy() + X_outlier[0] = [np.inf, 1] + X_outlier[2] = [1, np.nan] + X_outlier[5] = [np.inf, np.nan] + model = HDBSCAN().fit(X_outlier) + labels = model.dbscan_clustering(cut_distance=cut_distance) + + missing_labels_idx = np.flatnonzero(labels == missing_label) + assert_array_equal(missing_labels_idx, [2, 5]) + + infinite_labels_idx = np.flatnonzero(labels == infinite_label) + assert_array_equal(infinite_labels_idx, [0]) + + clean_idx = list(set(range(200)) - set(missing_labels_idx + infinite_labels_idx)) + clean_model = HDBSCAN().fit(X_outlier[clean_idx]) + clean_labels = clean_model.dbscan_clustering(cut_distance=cut_distance) + assert_array_equal(clean_labels, labels[clean_idx]) + + +def test_hdbscan_best_balltree_metric(): + """ + Tests that HDBSCAN using `BallTree` works. + """ + labels = HDBSCAN( + metric="seuclidean", metric_params={"V": np.ones(X.shape[1])} + ).fit_predict(X) + check_label_quality(labels) + + +def test_hdbscan_no_clusters(): + """ + Tests that HDBSCAN correctly does not generate a valid cluster when the + `min_cluster_size` is too large for the data. + """ + labels = HDBSCAN(min_cluster_size=len(X) - 1).fit_predict(X) + assert set(labels).issubset(OUTLIER_SET) + + +def test_hdbscan_min_cluster_size(): + """ + Test that the smallest non-noise cluster has at least `min_cluster_size` + many points + """ + for min_cluster_size in range(2, len(X), 1): + labels = HDBSCAN(min_cluster_size=min_cluster_size).fit_predict(X) + true_labels = [label for label in labels if label != -1] + if len(true_labels) != 0: + assert np.min(np.bincount(true_labels)) >= min_cluster_size + + +def test_hdbscan_callable_metric(): + """ + Tests that HDBSCAN works when passed a callable metric. + """ + metric = distance.euclidean + labels = HDBSCAN(metric=metric).fit_predict(X) + check_label_quality(labels) + + +@pytest.mark.parametrize("tree", ["kd_tree", "ball_tree"]) +def test_hdbscan_precomputed_non_brute(tree): + """ + Tests that HDBSCAN correctly raises an error when passing precomputed data + while requesting a tree-based algorithm. + """ + hdb = HDBSCAN(metric="precomputed", algorithm=tree) + msg = "precomputed is not a valid metric for" + with pytest.raises(ValueError, match=msg): + hdb.fit(X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_hdbscan_sparse(csr_container): + """ + Tests that HDBSCAN works correctly when passing sparse feature data. + Evaluates correctness by comparing against the same data passed as a dense + array. + """ + + dense_labels = HDBSCAN().fit(X).labels_ + check_label_quality(dense_labels) + + _X_sparse = csr_container(X) + X_sparse = _X_sparse.copy() + sparse_labels = HDBSCAN().fit(X_sparse).labels_ + assert_array_equal(dense_labels, sparse_labels) + + # Compare that the sparse and dense non-precomputed routines return the same labels + # where the 0th observation contains the outlier. + for outlier_val, outlier_type in ((np.inf, "infinite"), (np.nan, "missing")): + X_dense = X.copy() + X_dense[0, 0] = outlier_val + dense_labels = HDBSCAN().fit(X_dense).labels_ + check_label_quality(dense_labels) + assert dense_labels[0] == _OUTLIER_ENCODING[outlier_type]["label"] + + X_sparse = _X_sparse.copy() + X_sparse[0, 0] = outlier_val + sparse_labels = HDBSCAN().fit(X_sparse).labels_ + assert_array_equal(dense_labels, sparse_labels) + + msg = "Sparse data matrices only support algorithm `brute`." + with pytest.raises(ValueError, match=msg): + HDBSCAN(metric="euclidean", algorithm="ball_tree").fit(X_sparse) + + +@pytest.mark.parametrize("algorithm", ALGORITHMS) +def test_hdbscan_centers(algorithm): + """ + Tests that HDBSCAN centers are calculated and stored properly, and are + accurate to the data. + """ + centers = [(0.0, 0.0), (3.0, 3.0)] + H, _ = make_blobs(n_samples=2000, random_state=0, centers=centers, cluster_std=0.5) + hdb = HDBSCAN(store_centers="both").fit(H) + + for center, centroid, medoid in zip(centers, hdb.centroids_, hdb.medoids_): + assert_allclose(center, centroid, rtol=1, atol=0.05) + assert_allclose(center, medoid, rtol=1, atol=0.05) + + # Ensure that nothing is done for noise + hdb = HDBSCAN( + algorithm=algorithm, store_centers="both", min_cluster_size=X.shape[0] + ).fit(X) + assert hdb.centroids_.shape[0] == 0 + assert hdb.medoids_.shape[0] == 0 + + +def test_hdbscan_allow_single_cluster_with_epsilon(): + """ + Tests that HDBSCAN single-cluster selection with epsilon works correctly. + """ + rng = np.random.RandomState(0) + no_structure = rng.rand(150, 2) + # without epsilon we should see many noise points as children of root. + labels = HDBSCAN( + min_cluster_size=5, + cluster_selection_epsilon=0.0, + cluster_selection_method="eom", + allow_single_cluster=True, + ).fit_predict(no_structure) + unique_labels, counts = np.unique(labels, return_counts=True) + assert len(unique_labels) == 2 + + # Arbitrary heuristic. Would prefer something more precise. + assert counts[unique_labels == -1] > 30 + + # for this random seed an epsilon of 0.18 will produce exactly 2 noise + # points at that cut in single linkage. + labels = HDBSCAN( + min_cluster_size=5, + cluster_selection_epsilon=0.18, + cluster_selection_method="eom", + allow_single_cluster=True, + algorithm="kd_tree", + ).fit_predict(no_structure) + unique_labels, counts = np.unique(labels, return_counts=True) + assert len(unique_labels) == 2 + assert counts[unique_labels == -1] == 2 + + +def test_hdbscan_better_than_dbscan(): + """ + Validate that HDBSCAN can properly cluster this difficult synthetic + dataset. Note that DBSCAN fails on this (see HDBSCAN plotting + example) + """ + centers = [[-0.85, -0.85], [-0.85, 0.85], [3, 3], [3, -3]] + X, y = make_blobs( + n_samples=750, + centers=centers, + cluster_std=[0.2, 0.35, 1.35, 1.35], + random_state=0, + ) + labels = HDBSCAN().fit(X).labels_ + + n_clusters = len(set(labels)) - int(-1 in labels) + assert n_clusters == 4 + fowlkes_mallows_score(labels, y) > 0.99 + + +@pytest.mark.parametrize( + "kwargs, X", + [ + ({"metric": "precomputed"}, np.array([[1, np.inf], [np.inf, 1]])), + ({"metric": "precomputed"}, [[1, 2], [2, 1]]), + ({}, [[1, 2], [3, 4]]), + ], +) +def test_hdbscan_usable_inputs(X, kwargs): + """ + Tests that HDBSCAN works correctly for array-likes and precomputed inputs + with non-finite points. + """ + HDBSCAN(min_samples=1, **kwargs).fit(X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_hdbscan_sparse_distances_too_few_nonzero(csr_container): + """ + Tests that HDBSCAN raises the correct error when there are too few + non-zero distances. + """ + X = csr_container(np.zeros((10, 10))) + + msg = "There exists points with fewer than" + with pytest.raises(ValueError, match=msg): + HDBSCAN(metric="precomputed").fit(X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_hdbscan_sparse_distances_disconnected_graph(csr_container): + """ + Tests that HDBSCAN raises the correct error when the distance matrix + has multiple connected components. + """ + # Create symmetric sparse matrix with 2 connected components + X = np.zeros((20, 20)) + X[:5, :5] = 1 + X[5:, 15:] = 1 + X = X + X.T + X = csr_container(X) + msg = "HDBSCAN cannot be perfomed on a disconnected graph" + with pytest.raises(ValueError, match=msg): + HDBSCAN(metric="precomputed").fit(X) + + +def test_hdbscan_tree_invalid_metric(): + """ + Tests that HDBSCAN correctly raises an error for invalid metric choices. + """ + metric_callable = lambda x: x + msg = ( + ".* is not a valid metric for a .*-based algorithm\\. Please select a different" + " metric\\." + ) + + # Callables are not supported for either + with pytest.raises(ValueError, match=msg): + HDBSCAN(algorithm="kd_tree", metric=metric_callable).fit(X) + with pytest.raises(ValueError, match=msg): + HDBSCAN(algorithm="ball_tree", metric=metric_callable).fit(X) + + # The set of valid metrics for KDTree at the time of writing this test is a + # strict subset of those supported in BallTree + metrics_not_kd = list(set(BallTree.valid_metrics) - set(KDTree.valid_metrics)) + if len(metrics_not_kd) > 0: + with pytest.raises(ValueError, match=msg): + HDBSCAN(algorithm="kd_tree", metric=metrics_not_kd[0]).fit(X) + + +def test_hdbscan_too_many_min_samples(): + """ + Tests that HDBSCAN correctly raises an error when setting `min_samples` + larger than the number of samples. + """ + hdb = HDBSCAN(min_samples=len(X) + 1) + msg = r"min_samples (.*) must be at most" + with pytest.raises(ValueError, match=msg): + hdb.fit(X) + + +def test_hdbscan_precomputed_dense_nan(): + """ + Tests that HDBSCAN correctly raises an error when providing precomputed + distances with `np.nan` values. + """ + X_nan = X.copy() + X_nan[0, 0] = np.nan + msg = "np.nan values found in precomputed-dense" + hdb = HDBSCAN(metric="precomputed") + with pytest.raises(ValueError, match=msg): + hdb.fit(X_nan) + + +@pytest.mark.parametrize("allow_single_cluster", [True, False]) +@pytest.mark.parametrize("epsilon", [0, 0.1]) +def test_labelling_distinct(global_random_seed, allow_single_cluster, epsilon): + """ + Tests that the `_do_labelling` helper function correctly assigns labels. + """ + n_samples = 48 + X, y = make_blobs( + n_samples, + random_state=global_random_seed, + # Ensure the clusters are distinct with no overlap + centers=[ + [0, 0], + [10, 0], + [0, 10], + ], + ) + + est = HDBSCAN().fit(X) + condensed_tree = _condense_tree( + est._single_linkage_tree_, min_cluster_size=est.min_cluster_size + ) + clusters = {n_samples + 2, n_samples + 3, n_samples + 4} + cluster_label_map = {n_samples + 2: 0, n_samples + 3: 1, n_samples + 4: 2} + labels = _do_labelling( + condensed_tree=condensed_tree, + clusters=clusters, + cluster_label_map=cluster_label_map, + allow_single_cluster=allow_single_cluster, + cluster_selection_epsilon=epsilon, + ) + + first_with_label = {_y: np.where(y == _y)[0][0] for _y in list(set(y))} + y_to_labels = {_y: labels[first_with_label[_y]] for _y in list(set(y))} + aligned_target = np.vectorize(y_to_labels.get)(y) + assert_array_equal(labels, aligned_target) + + +def test_labelling_thresholding(): + """ + Tests that the `_do_labelling` helper function correctly thresholds the + incoming lambda values given various `cluster_selection_epsilon` values. + """ + n_samples = 5 + MAX_LAMBDA = 1.5 + condensed_tree = np.array( + [ + (5, 2, MAX_LAMBDA, 1), + (5, 1, 0.1, 1), + (5, 0, MAX_LAMBDA, 1), + (5, 3, 0.2, 1), + (5, 4, 0.3, 1), + ], + dtype=CONDENSED_dtype, + ) + labels = _do_labelling( + condensed_tree=condensed_tree, + clusters={n_samples}, + cluster_label_map={n_samples: 0, n_samples + 1: 1}, + allow_single_cluster=True, + cluster_selection_epsilon=1, + ) + num_noise = condensed_tree["value"] < 1 + assert sum(num_noise) == sum(labels == -1) + + labels = _do_labelling( + condensed_tree=condensed_tree, + clusters={n_samples}, + cluster_label_map={n_samples: 0, n_samples + 1: 1}, + allow_single_cluster=True, + cluster_selection_epsilon=0, + ) + # The threshold should be calculated per-sample based on the largest + # lambda of any simbling node. In this case, all points are siblings + # and the largest value is exactly MAX_LAMBDA. + num_noise = condensed_tree["value"] < MAX_LAMBDA + assert sum(num_noise) == sum(labels == -1) + + +# TODO(1.6): Remove +def test_hdbscan_warning_on_deprecated_algorithm_name(): + # Test that warning message is shown when algorithm='kdtree' + msg = ( + "`algorithm='kdtree'`has been deprecated in 1.4 and will be renamed" + " to'kd_tree'`in 1.6. To keep the past behaviour, set `algorithm='kd_tree'`." + ) + with pytest.warns(FutureWarning, match=msg): + HDBSCAN(algorithm="kdtree").fit(X) + + # Test that warning message is shown when algorithm='balltree' + msg = ( + "`algorithm='balltree'`has been deprecated in 1.4 and will be renamed" + " to'ball_tree'`in 1.6. To keep the past behaviour, set" + " `algorithm='ball_tree'`." + ) + with pytest.warns(FutureWarning, match=msg): + HDBSCAN(algorithm="balltree").fit(X) + + +@pytest.mark.parametrize("store_centers", ["centroid", "medoid"]) +def test_hdbscan_error_precomputed_and_store_centers(store_centers): + """Check that we raise an error if the centers are requested together with + a precomputed input matrix. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27893 + """ + rng = np.random.RandomState(0) + X = rng.random((100, 2)) + X_dist = euclidean_distances(X) + err_msg = "Cannot store centers when using a precomputed distance matrix." + with pytest.raises(ValueError, match=err_msg): + HDBSCAN(metric="precomputed", store_centers=store_centers).fit(X_dist) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_hierarchical.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_hierarchical.py new file mode 100644 index 0000000000000000000000000000000000000000..3c99dd50ea85f5273c30628d44ac95340d72dd43 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_hierarchical.py @@ -0,0 +1,899 @@ +""" +Several basic tests for hierarchical clustering procedures + +""" +# Authors: Vincent Michel, 2010, Gael Varoquaux 2012, +# Matteo Visconti di Oleggio Castello 2014 +# License: BSD 3 clause +import itertools +import shutil +from functools import partial +from tempfile import mkdtemp + +import numpy as np +import pytest +from scipy.cluster import hierarchy +from scipy.sparse.csgraph import connected_components + +from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration, ward_tree +from sklearn.cluster._agglomerative import ( + _TREE_BUILDERS, + _fix_connectivity, + _hc_cut, + linkage_tree, +) +from sklearn.cluster._hierarchical_fast import ( + average_merge, + max_merge, + mst_linkage_core, +) +from sklearn.datasets import make_circles, make_moons +from sklearn.feature_extraction.image import grid_to_graph +from sklearn.metrics import DistanceMetric +from sklearn.metrics.cluster import adjusted_rand_score, normalized_mutual_info_score +from sklearn.metrics.pairwise import ( + PAIRED_DISTANCES, + cosine_distances, + manhattan_distances, + pairwise_distances, +) +from sklearn.metrics.tests.test_dist_metrics import METRICS_DEFAULT_PARAMS +from sklearn.neighbors import kneighbors_graph +from sklearn.utils._fast_dict import IntFloatDict +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + create_memmap_backed_data, + ignore_warnings, +) +from sklearn.utils.fixes import LIL_CONTAINERS + + +def test_linkage_misc(): + # Misc tests on linkage + rng = np.random.RandomState(42) + X = rng.normal(size=(5, 5)) + + with pytest.raises(ValueError): + linkage_tree(X, linkage="foo") + + with pytest.raises(ValueError): + linkage_tree(X, connectivity=np.ones((4, 4))) + + # Smoke test FeatureAgglomeration + FeatureAgglomeration().fit(X) + + # test hierarchical clustering on a precomputed distances matrix + dis = cosine_distances(X) + + res = linkage_tree(dis, affinity="precomputed") + assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0]) + + # test hierarchical clustering on a precomputed distances matrix + res = linkage_tree(X, affinity=manhattan_distances) + assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0]) + + +def test_structured_linkage_tree(): + # Check that we obtain the correct solution for structured linkage trees. + rng = np.random.RandomState(0) + mask = np.ones([10, 10], dtype=bool) + # Avoiding a mask with only 'True' entries + mask[4:7, 4:7] = 0 + X = rng.randn(50, 100) + connectivity = grid_to_graph(*mask.shape) + for tree_builder in _TREE_BUILDERS.values(): + children, n_components, n_leaves, parent = tree_builder( + X.T, connectivity=connectivity + ) + n_nodes = 2 * X.shape[1] - 1 + assert len(children) + n_leaves == n_nodes + # Check that ward_tree raises a ValueError with a connectivity matrix + # of the wrong shape + with pytest.raises(ValueError): + tree_builder(X.T, connectivity=np.ones((4, 4))) + # Check that fitting with no samples raises an error + with pytest.raises(ValueError): + tree_builder(X.T[:0], connectivity=connectivity) + + +def test_unstructured_linkage_tree(): + # Check that we obtain the correct solution for unstructured linkage trees. + rng = np.random.RandomState(0) + X = rng.randn(50, 100) + for this_X in (X, X[0]): + # With specified a number of clusters just for the sake of + # raising a warning and testing the warning code + with ignore_warnings(): + with pytest.warns(UserWarning): + children, n_nodes, n_leaves, parent = ward_tree(this_X.T, n_clusters=10) + n_nodes = 2 * X.shape[1] - 1 + assert len(children) + n_leaves == n_nodes + + for tree_builder in _TREE_BUILDERS.values(): + for this_X in (X, X[0]): + with ignore_warnings(): + with pytest.warns(UserWarning): + children, n_nodes, n_leaves, parent = tree_builder( + this_X.T, n_clusters=10 + ) + n_nodes = 2 * X.shape[1] - 1 + assert len(children) + n_leaves == n_nodes + + +def test_height_linkage_tree(): + # Check that the height of the results of linkage tree is sorted. + rng = np.random.RandomState(0) + mask = np.ones([10, 10], dtype=bool) + X = rng.randn(50, 100) + connectivity = grid_to_graph(*mask.shape) + for linkage_func in _TREE_BUILDERS.values(): + children, n_nodes, n_leaves, parent = linkage_func( + X.T, connectivity=connectivity + ) + n_nodes = 2 * X.shape[1] - 1 + assert len(children) + n_leaves == n_nodes + + +def test_zero_cosine_linkage_tree(): + # Check that zero vectors in X produce an error when + # 'cosine' affinity is used + X = np.array([[0, 1], [0, 0]]) + msg = "Cosine affinity cannot be used when X contains zero vectors" + with pytest.raises(ValueError, match=msg): + linkage_tree(X, affinity="cosine") + + +@pytest.mark.parametrize("n_clusters, distance_threshold", [(None, 0.5), (10, None)]) +@pytest.mark.parametrize("compute_distances", [True, False]) +@pytest.mark.parametrize("linkage", ["ward", "complete", "average", "single"]) +def test_agglomerative_clustering_distances( + n_clusters, compute_distances, distance_threshold, linkage +): + # Check that when `compute_distances` is True or `distance_threshold` is + # given, the fitted model has an attribute `distances_`. + rng = np.random.RandomState(0) + mask = np.ones([10, 10], dtype=bool) + n_samples = 100 + X = rng.randn(n_samples, 50) + connectivity = grid_to_graph(*mask.shape) + + clustering = AgglomerativeClustering( + n_clusters=n_clusters, + connectivity=connectivity, + linkage=linkage, + distance_threshold=distance_threshold, + compute_distances=compute_distances, + ) + clustering.fit(X) + if compute_distances or (distance_threshold is not None): + assert hasattr(clustering, "distances_") + n_children = clustering.children_.shape[0] + n_nodes = n_children + 1 + assert clustering.distances_.shape == (n_nodes - 1,) + else: + assert not hasattr(clustering, "distances_") + + +@pytest.mark.parametrize("lil_container", LIL_CONTAINERS) +def test_agglomerative_clustering(global_random_seed, lil_container): + # Check that we obtain the correct number of clusters with + # agglomerative clustering. + rng = np.random.RandomState(global_random_seed) + mask = np.ones([10, 10], dtype=bool) + n_samples = 100 + X = rng.randn(n_samples, 50) + connectivity = grid_to_graph(*mask.shape) + for linkage in ("ward", "complete", "average", "single"): + clustering = AgglomerativeClustering( + n_clusters=10, connectivity=connectivity, linkage=linkage + ) + clustering.fit(X) + # test caching + try: + tempdir = mkdtemp() + clustering = AgglomerativeClustering( + n_clusters=10, + connectivity=connectivity, + memory=tempdir, + linkage=linkage, + ) + clustering.fit(X) + labels = clustering.labels_ + assert np.size(np.unique(labels)) == 10 + finally: + shutil.rmtree(tempdir) + # Turn caching off now + clustering = AgglomerativeClustering( + n_clusters=10, connectivity=connectivity, linkage=linkage + ) + # Check that we obtain the same solution with early-stopping of the + # tree building + clustering.compute_full_tree = False + clustering.fit(X) + assert_almost_equal(normalized_mutual_info_score(clustering.labels_, labels), 1) + clustering.connectivity = None + clustering.fit(X) + assert np.size(np.unique(clustering.labels_)) == 10 + # Check that we raise a TypeError on dense matrices + clustering = AgglomerativeClustering( + n_clusters=10, + connectivity=lil_container(connectivity.toarray()[:10, :10]), + linkage=linkage, + ) + with pytest.raises(ValueError): + clustering.fit(X) + + # Test that using ward with another metric than euclidean raises an + # exception + clustering = AgglomerativeClustering( + n_clusters=10, + connectivity=connectivity.toarray(), + metric="manhattan", + linkage="ward", + ) + with pytest.raises(ValueError): + clustering.fit(X) + + # Test using another metric than euclidean works with linkage complete + for metric in PAIRED_DISTANCES.keys(): + # Compare our (structured) implementation to scipy + clustering = AgglomerativeClustering( + n_clusters=10, + connectivity=np.ones((n_samples, n_samples)), + metric=metric, + linkage="complete", + ) + clustering.fit(X) + clustering2 = AgglomerativeClustering( + n_clusters=10, connectivity=None, metric=metric, linkage="complete" + ) + clustering2.fit(X) + assert_almost_equal( + normalized_mutual_info_score(clustering2.labels_, clustering.labels_), 1 + ) + + # Test that using a distance matrix (affinity = 'precomputed') has same + # results (with connectivity constraints) + clustering = AgglomerativeClustering( + n_clusters=10, connectivity=connectivity, linkage="complete" + ) + clustering.fit(X) + X_dist = pairwise_distances(X) + clustering2 = AgglomerativeClustering( + n_clusters=10, + connectivity=connectivity, + metric="precomputed", + linkage="complete", + ) + clustering2.fit(X_dist) + assert_array_equal(clustering.labels_, clustering2.labels_) + + +def test_agglomerative_clustering_memory_mapped(): + """AgglomerativeClustering must work on mem-mapped dataset. + + Non-regression test for issue #19875. + """ + rng = np.random.RandomState(0) + Xmm = create_memmap_backed_data(rng.randn(50, 100)) + AgglomerativeClustering(metric="euclidean", linkage="single").fit(Xmm) + + +def test_ward_agglomeration(global_random_seed): + # Check that we obtain the correct solution in a simplistic case + rng = np.random.RandomState(global_random_seed) + mask = np.ones([10, 10], dtype=bool) + X = rng.randn(50, 100) + connectivity = grid_to_graph(*mask.shape) + agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity) + agglo.fit(X) + assert np.size(np.unique(agglo.labels_)) == 5 + + X_red = agglo.transform(X) + assert X_red.shape[1] == 5 + X_full = agglo.inverse_transform(X_red) + assert np.unique(X_full[0]).size == 5 + assert_array_almost_equal(agglo.transform(X_full), X_red) + + # Check that fitting with no samples raises a ValueError + with pytest.raises(ValueError): + agglo.fit(X[:0]) + + +def test_single_linkage_clustering(): + # Check that we get the correct result in two emblematic cases + moons, moon_labels = make_moons(noise=0.05, random_state=42) + clustering = AgglomerativeClustering(n_clusters=2, linkage="single") + clustering.fit(moons) + assert_almost_equal( + normalized_mutual_info_score(clustering.labels_, moon_labels), 1 + ) + + circles, circle_labels = make_circles(factor=0.5, noise=0.025, random_state=42) + clustering = AgglomerativeClustering(n_clusters=2, linkage="single") + clustering.fit(circles) + assert_almost_equal( + normalized_mutual_info_score(clustering.labels_, circle_labels), 1 + ) + + +def assess_same_labelling(cut1, cut2): + """Util for comparison with scipy""" + co_clust = [] + for cut in [cut1, cut2]: + n = len(cut) + k = cut.max() + 1 + ecut = np.zeros((n, k)) + ecut[np.arange(n), cut] = 1 + co_clust.append(np.dot(ecut, ecut.T)) + assert (co_clust[0] == co_clust[1]).all() + + +def test_sparse_scikit_vs_scipy(global_random_seed): + # Test scikit linkage with full connectivity (i.e. unstructured) vs scipy + n, p, k = 10, 5, 3 + rng = np.random.RandomState(global_random_seed) + + # Not using a lil_matrix here, just to check that non sparse + # matrices are well handled + connectivity = np.ones((n, n)) + for linkage in _TREE_BUILDERS.keys(): + for i in range(5): + X = 0.1 * rng.normal(size=(n, p)) + X -= 4.0 * np.arange(n)[:, np.newaxis] + X -= X.mean(axis=1)[:, np.newaxis] + + out = hierarchy.linkage(X, method=linkage) + + children_ = out[:, :2].astype(int, copy=False) + children, _, n_leaves, _ = _TREE_BUILDERS[linkage]( + X, connectivity=connectivity + ) + + # Sort the order of child nodes per row for consistency + children.sort(axis=1) + assert_array_equal( + children, + children_, + "linkage tree differs from scipy impl for linkage: " + linkage, + ) + + cut = _hc_cut(k, children, n_leaves) + cut_ = _hc_cut(k, children_, n_leaves) + assess_same_labelling(cut, cut_) + + # Test error management in _hc_cut + with pytest.raises(ValueError): + _hc_cut(n_leaves + 1, children, n_leaves) + + +# Make sure our custom mst_linkage_core gives +# the same results as scipy's builtin +def test_vector_scikit_single_vs_scipy_single(global_random_seed): + n_samples, n_features, n_clusters = 10, 5, 3 + rng = np.random.RandomState(global_random_seed) + X = 0.1 * rng.normal(size=(n_samples, n_features)) + X -= 4.0 * np.arange(n_samples)[:, np.newaxis] + X -= X.mean(axis=1)[:, np.newaxis] + + out = hierarchy.linkage(X, method="single") + children_scipy = out[:, :2].astype(int) + + children, _, n_leaves, _ = _TREE_BUILDERS["single"](X) + + # Sort the order of child nodes per row for consistency + children.sort(axis=1) + assert_array_equal( + children, + children_scipy, + "linkage tree differs from scipy impl for single linkage.", + ) + + cut = _hc_cut(n_clusters, children, n_leaves) + cut_scipy = _hc_cut(n_clusters, children_scipy, n_leaves) + assess_same_labelling(cut, cut_scipy) + + +@pytest.mark.parametrize("metric_param_grid", METRICS_DEFAULT_PARAMS) +def test_mst_linkage_core_memory_mapped(metric_param_grid): + """The MST-LINKAGE-CORE algorithm must work on mem-mapped dataset. + + Non-regression test for issue #19875. + """ + rng = np.random.RandomState(seed=1) + X = rng.normal(size=(20, 4)) + Xmm = create_memmap_backed_data(X) + metric, param_grid = metric_param_grid + keys = param_grid.keys() + for vals in itertools.product(*param_grid.values()): + kwargs = dict(zip(keys, vals)) + distance_metric = DistanceMetric.get_metric(metric, **kwargs) + mst = mst_linkage_core(X, distance_metric) + mst_mm = mst_linkage_core(Xmm, distance_metric) + np.testing.assert_equal(mst, mst_mm) + + +def test_identical_points(): + # Ensure identical points are handled correctly when using mst with + # a sparse connectivity matrix + X = np.array([[0, 0, 0], [0, 0, 0], [1, 1, 1], [1, 1, 1], [2, 2, 2], [2, 2, 2]]) + true_labels = np.array([0, 0, 1, 1, 2, 2]) + connectivity = kneighbors_graph(X, n_neighbors=3, include_self=False) + connectivity = 0.5 * (connectivity + connectivity.T) + connectivity, n_components = _fix_connectivity(X, connectivity, "euclidean") + + for linkage in ("single", "average", "average", "ward"): + clustering = AgglomerativeClustering( + n_clusters=3, linkage=linkage, connectivity=connectivity + ) + clustering.fit(X) + + assert_almost_equal( + normalized_mutual_info_score(clustering.labels_, true_labels), 1 + ) + + +def test_connectivity_propagation(): + # Check that connectivity in the ward tree is propagated correctly during + # merging. + X = np.array( + [ + (0.014, 0.120), + (0.014, 0.099), + (0.014, 0.097), + (0.017, 0.153), + (0.017, 0.153), + (0.018, 0.153), + (0.018, 0.153), + (0.018, 0.153), + (0.018, 0.153), + (0.018, 0.153), + (0.018, 0.153), + (0.018, 0.153), + (0.018, 0.152), + (0.018, 0.149), + (0.018, 0.144), + ] + ) + connectivity = kneighbors_graph(X, 10, include_self=False) + ward = AgglomerativeClustering( + n_clusters=4, connectivity=connectivity, linkage="ward" + ) + # If changes are not propagated correctly, fit crashes with an + # IndexError + ward.fit(X) + + +def test_ward_tree_children_order(global_random_seed): + # Check that children are ordered in the same way for both structured and + # unstructured versions of ward_tree. + + # test on five random datasets + n, p = 10, 5 + rng = np.random.RandomState(global_random_seed) + + connectivity = np.ones((n, n)) + for i in range(5): + X = 0.1 * rng.normal(size=(n, p)) + X -= 4.0 * np.arange(n)[:, np.newaxis] + X -= X.mean(axis=1)[:, np.newaxis] + + out_unstructured = ward_tree(X) + out_structured = ward_tree(X, connectivity=connectivity) + + assert_array_equal(out_unstructured[0], out_structured[0]) + + +def test_ward_linkage_tree_return_distance(global_random_seed): + # Test return_distance option on linkage and ward trees + + # test that return_distance when set true, gives same + # output on both structured and unstructured clustering. + n, p = 10, 5 + rng = np.random.RandomState(global_random_seed) + + connectivity = np.ones((n, n)) + for i in range(5): + X = 0.1 * rng.normal(size=(n, p)) + X -= 4.0 * np.arange(n)[:, np.newaxis] + X -= X.mean(axis=1)[:, np.newaxis] + + out_unstructured = ward_tree(X, return_distance=True) + out_structured = ward_tree(X, connectivity=connectivity, return_distance=True) + + # get children + children_unstructured = out_unstructured[0] + children_structured = out_structured[0] + + # check if we got the same clusters + assert_array_equal(children_unstructured, children_structured) + + # check if the distances are the same + dist_unstructured = out_unstructured[-1] + dist_structured = out_structured[-1] + + assert_array_almost_equal(dist_unstructured, dist_structured) + + for linkage in ["average", "complete", "single"]: + structured_items = linkage_tree( + X, connectivity=connectivity, linkage=linkage, return_distance=True + )[-1] + unstructured_items = linkage_tree(X, linkage=linkage, return_distance=True)[ + -1 + ] + structured_dist = structured_items[-1] + unstructured_dist = unstructured_items[-1] + structured_children = structured_items[0] + unstructured_children = unstructured_items[0] + assert_array_almost_equal(structured_dist, unstructured_dist) + assert_array_almost_equal(structured_children, unstructured_children) + + # test on the following dataset where we know the truth + # taken from scipy/cluster/tests/hierarchy_test_data.py + X = np.array( + [ + [1.43054825, -7.5693489], + [6.95887839, 6.82293382], + [2.87137846, -9.68248579], + [7.87974764, -6.05485803], + [8.24018364, -6.09495602], + [7.39020262, 8.54004355], + ] + ) + # truth + linkage_X_ward = np.array( + [ + [3.0, 4.0, 0.36265956, 2.0], + [1.0, 5.0, 1.77045373, 2.0], + [0.0, 2.0, 2.55760419, 2.0], + [6.0, 8.0, 9.10208346, 4.0], + [7.0, 9.0, 24.7784379, 6.0], + ] + ) + + linkage_X_complete = np.array( + [ + [3.0, 4.0, 0.36265956, 2.0], + [1.0, 5.0, 1.77045373, 2.0], + [0.0, 2.0, 2.55760419, 2.0], + [6.0, 8.0, 6.96742194, 4.0], + [7.0, 9.0, 18.77445997, 6.0], + ] + ) + + linkage_X_average = np.array( + [ + [3.0, 4.0, 0.36265956, 2.0], + [1.0, 5.0, 1.77045373, 2.0], + [0.0, 2.0, 2.55760419, 2.0], + [6.0, 8.0, 6.55832839, 4.0], + [7.0, 9.0, 15.44089605, 6.0], + ] + ) + + n_samples, n_features = np.shape(X) + connectivity_X = np.ones((n_samples, n_samples)) + + out_X_unstructured = ward_tree(X, return_distance=True) + out_X_structured = ward_tree(X, connectivity=connectivity_X, return_distance=True) + + # check that the labels are the same + assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0]) + assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0]) + + # check that the distances are correct + assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4]) + assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4]) + + linkage_options = ["complete", "average", "single"] + X_linkage_truth = [linkage_X_complete, linkage_X_average] + for linkage, X_truth in zip(linkage_options, X_linkage_truth): + out_X_unstructured = linkage_tree(X, return_distance=True, linkage=linkage) + out_X_structured = linkage_tree( + X, connectivity=connectivity_X, linkage=linkage, return_distance=True + ) + + # check that the labels are the same + assert_array_equal(X_truth[:, :2], out_X_unstructured[0]) + assert_array_equal(X_truth[:, :2], out_X_structured[0]) + + # check that the distances are correct + assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4]) + assert_array_almost_equal(X_truth[:, 2], out_X_structured[4]) + + +def test_connectivity_fixing_non_lil(): + # Check non regression of a bug if a non item assignable connectivity is + # provided with more than one component. + # create dummy data + x = np.array([[0, 0], [1, 1]]) + # create a mask with several components to force connectivity fixing + m = np.array([[True, False], [False, True]]) + c = grid_to_graph(n_x=2, n_y=2, mask=m) + w = AgglomerativeClustering(connectivity=c, linkage="ward") + with pytest.warns(UserWarning): + w.fit(x) + + +def test_int_float_dict(): + rng = np.random.RandomState(0) + keys = np.unique(rng.randint(100, size=10).astype(np.intp, copy=False)) + values = rng.rand(len(keys)) + + d = IntFloatDict(keys, values) + for key, value in zip(keys, values): + assert d[key] == value + + other_keys = np.arange(50, dtype=np.intp)[::2] + other_values = np.full(50, 0.5)[::2] + other = IntFloatDict(other_keys, other_values) + # Complete smoke test + max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1) + average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1) + + +def test_connectivity_callable(): + rng = np.random.RandomState(0) + X = rng.rand(20, 5) + connectivity = kneighbors_graph(X, 3, include_self=False) + aglc1 = AgglomerativeClustering(connectivity=connectivity) + aglc2 = AgglomerativeClustering( + connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False) + ) + aglc1.fit(X) + aglc2.fit(X) + assert_array_equal(aglc1.labels_, aglc2.labels_) + + +def test_connectivity_ignores_diagonal(): + rng = np.random.RandomState(0) + X = rng.rand(20, 5) + connectivity = kneighbors_graph(X, 3, include_self=False) + connectivity_include_self = kneighbors_graph(X, 3, include_self=True) + aglc1 = AgglomerativeClustering(connectivity=connectivity) + aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self) + aglc1.fit(X) + aglc2.fit(X) + assert_array_equal(aglc1.labels_, aglc2.labels_) + + +def test_compute_full_tree(): + # Test that the full tree is computed if n_clusters is small + rng = np.random.RandomState(0) + X = rng.randn(10, 2) + connectivity = kneighbors_graph(X, 5, include_self=False) + + # When n_clusters is less, the full tree should be built + # that is the number of merges should be n_samples - 1 + agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity) + agc.fit(X) + n_samples = X.shape[0] + n_nodes = agc.children_.shape[0] + assert n_nodes == n_samples - 1 + + # When n_clusters is large, greater than max of 100 and 0.02 * n_samples. + # we should stop when there are n_clusters. + n_clusters = 101 + X = rng.randn(200, 2) + connectivity = kneighbors_graph(X, 10, include_self=False) + agc = AgglomerativeClustering(n_clusters=n_clusters, connectivity=connectivity) + agc.fit(X) + n_samples = X.shape[0] + n_nodes = agc.children_.shape[0] + assert n_nodes == n_samples - n_clusters + + +def test_n_components(): + # Test n_components returned by linkage, average and ward tree + rng = np.random.RandomState(0) + X = rng.rand(5, 5) + + # Connectivity matrix having five components. + connectivity = np.eye(5) + + for linkage_func in _TREE_BUILDERS.values(): + assert ignore_warnings(linkage_func)(X, connectivity=connectivity)[1] == 5 + + +def test_affinity_passed_to_fix_connectivity(): + # Test that the affinity parameter is actually passed to the pairwise + # function + + size = 2 + rng = np.random.RandomState(0) + X = rng.randn(size, size) + mask = np.array([True, False, False, True]) + + connectivity = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray) + + class FakeAffinity: + def __init__(self): + self.counter = 0 + + def increment(self, *args, **kwargs): + self.counter += 1 + return self.counter + + fa = FakeAffinity() + + linkage_tree(X, connectivity=connectivity, affinity=fa.increment) + + assert fa.counter == 3 + + +@pytest.mark.parametrize("linkage", ["ward", "complete", "average"]) +def test_agglomerative_clustering_with_distance_threshold(linkage, global_random_seed): + # Check that we obtain the correct number of clusters with + # agglomerative clustering with distance_threshold. + rng = np.random.RandomState(global_random_seed) + mask = np.ones([10, 10], dtype=bool) + n_samples = 100 + X = rng.randn(n_samples, 50) + connectivity = grid_to_graph(*mask.shape) + # test when distance threshold is set to 10 + distance_threshold = 10 + for conn in [None, connectivity]: + clustering = AgglomerativeClustering( + n_clusters=None, + distance_threshold=distance_threshold, + connectivity=conn, + linkage=linkage, + ) + clustering.fit(X) + clusters_produced = clustering.labels_ + num_clusters_produced = len(np.unique(clustering.labels_)) + # test if the clusters produced match the point in the linkage tree + # where the distance exceeds the threshold + tree_builder = _TREE_BUILDERS[linkage] + children, n_components, n_leaves, parent, distances = tree_builder( + X, connectivity=conn, n_clusters=None, return_distance=True + ) + num_clusters_at_threshold = ( + np.count_nonzero(distances >= distance_threshold) + 1 + ) + # test number of clusters produced + assert num_clusters_at_threshold == num_clusters_produced + # test clusters produced + clusters_at_threshold = _hc_cut( + n_clusters=num_clusters_produced, children=children, n_leaves=n_leaves + ) + assert np.array_equiv(clusters_produced, clusters_at_threshold) + + +def test_small_distance_threshold(global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 10 + X = rng.randint(-300, 300, size=(n_samples, 3)) + # this should result in all data in their own clusters, given that + # their pairwise distances are bigger than .1 (which may not be the case + # with a different random seed). + clustering = AgglomerativeClustering( + n_clusters=None, distance_threshold=1.0, linkage="single" + ).fit(X) + # check that the pairwise distances are indeed all larger than .1 + all_distances = pairwise_distances(X, metric="minkowski", p=2) + np.fill_diagonal(all_distances, np.inf) + assert np.all(all_distances > 0.1) + assert clustering.n_clusters_ == n_samples + + +def test_cluster_distances_with_distance_threshold(global_random_seed): + rng = np.random.RandomState(global_random_seed) + n_samples = 100 + X = rng.randint(-10, 10, size=(n_samples, 3)) + # check the distances within the clusters and with other clusters + distance_threshold = 4 + clustering = AgglomerativeClustering( + n_clusters=None, distance_threshold=distance_threshold, linkage="single" + ).fit(X) + labels = clustering.labels_ + D = pairwise_distances(X, metric="minkowski", p=2) + # to avoid taking the 0 diagonal in min() + np.fill_diagonal(D, np.inf) + for label in np.unique(labels): + in_cluster_mask = labels == label + max_in_cluster_distance = ( + D[in_cluster_mask][:, in_cluster_mask].min(axis=0).max() + ) + min_out_cluster_distance = ( + D[in_cluster_mask][:, ~in_cluster_mask].min(axis=0).min() + ) + # single data point clusters only have that inf diagonal here + if in_cluster_mask.sum() > 1: + assert max_in_cluster_distance < distance_threshold + assert min_out_cluster_distance >= distance_threshold + + +@pytest.mark.parametrize("linkage", ["ward", "complete", "average"]) +@pytest.mark.parametrize( + ("threshold", "y_true"), [(0.5, [1, 0]), (1.0, [1, 0]), (1.5, [0, 0])] +) +def test_agglomerative_clustering_with_distance_threshold_edge_case( + linkage, threshold, y_true +): + # test boundary case of distance_threshold matching the distance + X = [[0], [1]] + clusterer = AgglomerativeClustering( + n_clusters=None, distance_threshold=threshold, linkage=linkage + ) + y_pred = clusterer.fit_predict(X) + assert adjusted_rand_score(y_true, y_pred) == 1 + + +def test_dist_threshold_invalid_parameters(): + X = [[0], [1]] + with pytest.raises(ValueError, match="Exactly one of "): + AgglomerativeClustering(n_clusters=None, distance_threshold=None).fit(X) + + with pytest.raises(ValueError, match="Exactly one of "): + AgglomerativeClustering(n_clusters=2, distance_threshold=1).fit(X) + + X = [[0], [1]] + with pytest.raises(ValueError, match="compute_full_tree must be True if"): + AgglomerativeClustering( + n_clusters=None, distance_threshold=1, compute_full_tree=False + ).fit(X) + + +def test_invalid_shape_precomputed_dist_matrix(): + # Check that an error is raised when affinity='precomputed' + # and a non square matrix is passed (PR #16257). + rng = np.random.RandomState(0) + X = rng.rand(5, 3) + with pytest.raises( + ValueError, + match=r"Distance matrix should be square, got matrix of shape \(5, 3\)", + ): + AgglomerativeClustering(metric="precomputed", linkage="complete").fit(X) + + +def test_precomputed_connectivity_metric_with_2_connected_components(): + """Check that connecting components works when connectivity and + affinity are both precomputed and the number of connected components is + greater than 1. Non-regression test for #16151. + """ + + connectivity_matrix = np.array( + [ + [0, 1, 1, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 1], + [0, 0, 0, 0, 0], + ] + ) + # ensure that connectivity_matrix has two connected components + assert connected_components(connectivity_matrix)[0] == 2 + + rng = np.random.RandomState(0) + X = rng.randn(5, 10) + + X_dist = pairwise_distances(X) + clusterer_precomputed = AgglomerativeClustering( + metric="precomputed", connectivity=connectivity_matrix, linkage="complete" + ) + msg = "Completing it to avoid stopping the tree early" + with pytest.warns(UserWarning, match=msg): + clusterer_precomputed.fit(X_dist) + + clusterer = AgglomerativeClustering( + connectivity=connectivity_matrix, linkage="complete" + ) + with pytest.warns(UserWarning, match=msg): + clusterer.fit(X) + + assert_array_equal(clusterer.labels_, clusterer_precomputed.labels_) + assert_array_equal(clusterer.children_, clusterer_precomputed.children_) + + +# TODO(1.6): remove in 1.6 +@pytest.mark.parametrize( + "Agglomeration", [AgglomerativeClustering, FeatureAgglomeration] +) +def test_deprecation_warning_metric_None(Agglomeration): + X = np.array([[1, 2], [1, 4], [1, 0], [4, 2], [4, 4], [4, 0]]) + warn_msg = "`metric=None` is deprecated in version 1.4 and will be removed" + with pytest.warns(FutureWarning, match=warn_msg): + Agglomeration(metric=None).fit(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_k_means.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_k_means.py new file mode 100644 index 0000000000000000000000000000000000000000..4a112a30b29ed6edd5223b612d727c4784bce8e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_k_means.py @@ -0,0 +1,1372 @@ +"""Testing for K-means""" +import re +import sys +from io import StringIO + +import numpy as np +import pytest +from scipy import sparse as sp + +from sklearn.base import clone +from sklearn.cluster import KMeans, MiniBatchKMeans, k_means, kmeans_plusplus +from sklearn.cluster._k_means_common import ( + _euclidean_dense_dense_wrapper, + _euclidean_sparse_dense_wrapper, + _inertia_dense, + _inertia_sparse, + _is_same_clustering, + _relocate_empty_clusters_dense, + _relocate_empty_clusters_sparse, +) +from sklearn.cluster._kmeans import _labels_inertia, _mini_batch_step +from sklearn.datasets import make_blobs +from sklearn.exceptions import ConvergenceWarning +from sklearn.metrics import pairwise_distances, pairwise_distances_argmin +from sklearn.metrics.cluster import v_measure_score +from sklearn.metrics.pairwise import euclidean_distances +from sklearn.utils._testing import ( + assert_allclose, + assert_array_equal, + create_memmap_backed_data, +) +from sklearn.utils.extmath import row_norms +from sklearn.utils.fixes import CSR_CONTAINERS, threadpool_limits + +# non centered, sparse centers to check the +centers = np.array( + [ + [0.0, 5.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 4.0, 0.0, 0.0], + [1.0, 0.0, 0.0, 5.0, 1.0], + ] +) +n_samples = 100 +n_clusters, n_features = centers.shape +X, true_labels = make_blobs( + n_samples=n_samples, centers=centers, cluster_std=1.0, random_state=42 +) +X_as_any_csr = [container(X) for container in CSR_CONTAINERS] +data_containers = [np.array] + CSR_CONTAINERS +data_containers_ids = ( + ["dense", "sparse_matrix", "sparse_array"] + if len(X_as_any_csr) == 2 + else ["dense", "sparse_matrix"] +) + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize("algo", ["lloyd", "elkan"]) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_kmeans_results(array_constr, algo, dtype): + # Checks that KMeans works as intended on toy dataset by comparing with + # expected results computed by hand. + X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=dtype) + sample_weight = [3, 1, 1, 3] + init_centers = np.array([[0, 0], [1, 1]], dtype=dtype) + + expected_labels = [0, 0, 1, 1] + expected_inertia = 0.375 + expected_centers = np.array([[0.125, 0], [0.875, 1]], dtype=dtype) + expected_n_iter = 2 + + kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo) + kmeans.fit(X, sample_weight=sample_weight) + + assert_array_equal(kmeans.labels_, expected_labels) + assert_allclose(kmeans.inertia_, expected_inertia) + assert_allclose(kmeans.cluster_centers_, expected_centers) + assert kmeans.n_iter_ == expected_n_iter + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize("algo", ["lloyd", "elkan"]) +def test_kmeans_relocated_clusters(array_constr, algo): + # check that empty clusters are relocated as expected + X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]]) + + # second center too far from others points will be empty at first iter + init_centers = np.array([[0.5, 0.5], [3, 3]]) + + kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo) + kmeans.fit(X) + + expected_n_iter = 3 + expected_inertia = 0.25 + assert_allclose(kmeans.inertia_, expected_inertia) + assert kmeans.n_iter_ == expected_n_iter + + # There are two acceptable ways of relocating clusters in this example, the output + # depends on how the argpartition strategy breaks ties. We accept both outputs. + try: + expected_labels = [0, 0, 1, 1] + expected_centers = [[0.25, 0], [0.75, 1]] + assert_array_equal(kmeans.labels_, expected_labels) + assert_allclose(kmeans.cluster_centers_, expected_centers) + except AssertionError: + expected_labels = [1, 1, 0, 0] + expected_centers = [[0.75, 1.0], [0.25, 0.0]] + assert_array_equal(kmeans.labels_, expected_labels) + assert_allclose(kmeans.cluster_centers_, expected_centers) + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +def test_relocate_empty_clusters(array_constr): + # test for the _relocate_empty_clusters_(dense/sparse) helpers + + # Synthetic dataset with 3 obvious clusters of different sizes + X = np.array([-10.0, -9.5, -9, -8.5, -8, -1, 1, 9, 9.5, 10]).reshape(-1, 1) + X = array_constr(X) + sample_weight = np.ones(10) + + # centers all initialized to the first point of X + centers_old = np.array([-10.0, -10, -10]).reshape(-1, 1) + + # With this initialization, all points will be assigned to the first center + # At this point a center in centers_new is the weighted sum of the points + # it contains if it's not empty, otherwise it is the same as before. + centers_new = np.array([-16.5, -10, -10]).reshape(-1, 1) + weight_in_clusters = np.array([10.0, 0, 0]) + labels = np.zeros(10, dtype=np.int32) + + if array_constr is np.array: + _relocate_empty_clusters_dense( + X, sample_weight, centers_old, centers_new, weight_in_clusters, labels + ) + else: + _relocate_empty_clusters_sparse( + X.data, + X.indices, + X.indptr, + sample_weight, + centers_old, + centers_new, + weight_in_clusters, + labels, + ) + + # The relocation scheme will take the 2 points farthest from the center and + # assign them to the 2 empty clusters, i.e. points at 10 and at 9.9. The + # first center will be updated to contain the other 8 points. + assert_array_equal(weight_in_clusters, [8, 1, 1]) + assert_allclose(centers_new, [[-36], [10], [9.5]]) + + +@pytest.mark.parametrize("distribution", ["normal", "blobs"]) +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize("tol", [1e-2, 1e-8, 1e-100, 0]) +def test_kmeans_elkan_results(distribution, array_constr, tol, global_random_seed): + # Check that results are identical between lloyd and elkan algorithms + rnd = np.random.RandomState(global_random_seed) + if distribution == "normal": + X = rnd.normal(size=(5000, 10)) + else: + X, _ = make_blobs(random_state=rnd) + X[X < 0] = 0 + X = array_constr(X) + + km_lloyd = KMeans(n_clusters=5, random_state=global_random_seed, n_init=1, tol=tol) + km_elkan = KMeans( + algorithm="elkan", + n_clusters=5, + random_state=global_random_seed, + n_init=1, + tol=tol, + ) + + km_lloyd.fit(X) + km_elkan.fit(X) + assert_allclose(km_elkan.cluster_centers_, km_lloyd.cluster_centers_) + assert_array_equal(km_elkan.labels_, km_lloyd.labels_) + assert km_elkan.n_iter_ == km_lloyd.n_iter_ + assert km_elkan.inertia_ == pytest.approx(km_lloyd.inertia_, rel=1e-6) + + +@pytest.mark.parametrize("algorithm", ["lloyd", "elkan"]) +def test_kmeans_convergence(algorithm, global_random_seed): + # Check that KMeans stops when convergence is reached when tol=0. (#16075) + rnd = np.random.RandomState(global_random_seed) + X = rnd.normal(size=(5000, 10)) + max_iter = 300 + + km = KMeans( + algorithm=algorithm, + n_clusters=5, + random_state=global_random_seed, + n_init=1, + tol=0, + max_iter=max_iter, + ).fit(X) + + assert km.n_iter_ < max_iter + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_predict_sample_weight_deprecation_warning(Estimator): + X = np.random.rand(100, 2) + sample_weight = np.random.uniform(size=100) + kmeans = Estimator() + kmeans.fit(X, sample_weight=sample_weight) + warn_msg = ( + "'sample_weight' was deprecated in version 1.3 and will be removed in 1.5." + ) + with pytest.warns(FutureWarning, match=warn_msg): + kmeans.predict(X, sample_weight=sample_weight) + + +@pytest.mark.parametrize("X_csr", X_as_any_csr) +def test_minibatch_update_consistency(X_csr, global_random_seed): + # Check that dense and sparse minibatch update give the same results + rng = np.random.RandomState(global_random_seed) + + centers_old = centers + rng.normal(size=centers.shape) + centers_old_csr = centers_old.copy() + + centers_new = np.zeros_like(centers_old) + centers_new_csr = np.zeros_like(centers_old_csr) + + weight_sums = np.zeros(centers_old.shape[0], dtype=X.dtype) + weight_sums_csr = np.zeros(centers_old.shape[0], dtype=X.dtype) + + sample_weight = np.ones(X.shape[0], dtype=X.dtype) + + # extract a small minibatch + X_mb = X[:10] + X_mb_csr = X_csr[:10] + sample_weight_mb = sample_weight[:10] + + # step 1: compute the dense minibatch update + old_inertia = _mini_batch_step( + X_mb, + sample_weight_mb, + centers_old, + centers_new, + weight_sums, + np.random.RandomState(global_random_seed), + random_reassign=False, + ) + assert old_inertia > 0.0 + + # compute the new inertia on the same batch to check that it decreased + labels, new_inertia = _labels_inertia(X_mb, sample_weight_mb, centers_new) + assert new_inertia > 0.0 + assert new_inertia < old_inertia + + # step 2: compute the sparse minibatch update + old_inertia_csr = _mini_batch_step( + X_mb_csr, + sample_weight_mb, + centers_old_csr, + centers_new_csr, + weight_sums_csr, + np.random.RandomState(global_random_seed), + random_reassign=False, + ) + assert old_inertia_csr > 0.0 + + # compute the new inertia on the same batch to check that it decreased + labels_csr, new_inertia_csr = _labels_inertia( + X_mb_csr, sample_weight_mb, centers_new_csr + ) + assert new_inertia_csr > 0.0 + assert new_inertia_csr < old_inertia_csr + + # step 3: check that sparse and dense updates lead to the same results + assert_array_equal(labels, labels_csr) + assert_allclose(centers_new, centers_new_csr) + assert_allclose(old_inertia, old_inertia_csr) + assert_allclose(new_inertia, new_inertia_csr) + + +def _check_fitted_model(km): + # check that the number of clusters centers and distinct labels match + # the expectation + centers = km.cluster_centers_ + assert centers.shape == (n_clusters, n_features) + + labels = km.labels_ + assert np.unique(labels).shape[0] == n_clusters + + # check that the labels assignment are perfect (up to a permutation) + assert_allclose(v_measure_score(true_labels, labels), 1.0) + assert km.inertia_ > 0.0 + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, + ids=data_containers_ids, +) +@pytest.mark.parametrize( + "init", + ["random", "k-means++", centers, lambda X, k, random_state: centers], + ids=["random", "k-means++", "ndarray", "callable"], +) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_all_init(Estimator, input_data, init): + # Check KMeans and MiniBatchKMeans with all possible init. + n_init = 10 if isinstance(init, str) else 1 + km = Estimator( + init=init, n_clusters=n_clusters, random_state=42, n_init=n_init + ).fit(input_data) + _check_fitted_model(km) + + +@pytest.mark.parametrize( + "init", + ["random", "k-means++", centers, lambda X, k, random_state: centers], + ids=["random", "k-means++", "ndarray", "callable"], +) +def test_minibatch_kmeans_partial_fit_init(init): + # Check MiniBatchKMeans init with partial_fit + n_init = 10 if isinstance(init, str) else 1 + km = MiniBatchKMeans( + init=init, n_clusters=n_clusters, random_state=0, n_init=n_init + ) + for i in range(100): + # "random" init requires many batches to recover the true labels. + km.partial_fit(X) + _check_fitted_model(km) + + +@pytest.mark.parametrize( + "init, expected_n_init", + [ + ("k-means++", 1), + ("random", "default"), + ( + lambda X, n_clusters, random_state: random_state.uniform( + size=(n_clusters, X.shape[1]) + ), + "default", + ), + ("array-like", 1), + ], +) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_kmeans_init_auto_with_initial_centroids(Estimator, init, expected_n_init): + """Check that `n_init="auto"` chooses the right number of initializations. + Non-regression test for #26657: + https://github.com/scikit-learn/scikit-learn/pull/26657 + """ + n_sample, n_features, n_clusters = 100, 10, 5 + X = np.random.randn(n_sample, n_features) + if init == "array-like": + init = np.random.randn(n_clusters, n_features) + if expected_n_init == "default": + expected_n_init = 3 if Estimator is MiniBatchKMeans else 10 + + kmeans = Estimator(n_clusters=n_clusters, init=init, n_init="auto").fit(X) + assert kmeans._n_init == expected_n_init + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_fortran_aligned_data(Estimator, global_random_seed): + # Check that KMeans works with fortran-aligned data. + X_fortran = np.asfortranarray(X) + centers_fortran = np.asfortranarray(centers) + + km_c = Estimator( + n_clusters=n_clusters, init=centers, n_init=1, random_state=global_random_seed + ).fit(X) + km_f = Estimator( + n_clusters=n_clusters, + init=centers_fortran, + n_init=1, + random_state=global_random_seed, + ).fit(X_fortran) + assert_allclose(km_c.cluster_centers_, km_f.cluster_centers_) + assert_array_equal(km_c.labels_, km_f.labels_) + + +def test_minibatch_kmeans_verbose(): + # Check verbose mode of MiniBatchKMeans for better coverage. + km = MiniBatchKMeans(n_clusters=n_clusters, random_state=42, verbose=1) + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + km.fit(X) + finally: + sys.stdout = old_stdout + + +@pytest.mark.parametrize("algorithm", ["lloyd", "elkan"]) +@pytest.mark.parametrize("tol", [1e-2, 0]) +def test_kmeans_verbose(algorithm, tol, capsys): + # Check verbose mode of KMeans for better coverage. + X = np.random.RandomState(0).normal(size=(5000, 10)) + + KMeans( + algorithm=algorithm, + n_clusters=n_clusters, + random_state=42, + init="random", + n_init=1, + tol=tol, + verbose=1, + ).fit(X) + + captured = capsys.readouterr() + + assert re.search(r"Initialization complete", captured.out) + assert re.search(r"Iteration [0-9]+, inertia", captured.out) + + if tol == 0: + assert re.search(r"strict convergence", captured.out) + else: + assert re.search(r"center shift .* within tolerance", captured.out) + + +def test_minibatch_kmeans_warning_init_size(): + # Check that a warning is raised when init_size is smaller than n_clusters + with pytest.warns( + RuntimeWarning, match=r"init_size.* should be larger than n_clusters" + ): + MiniBatchKMeans(init_size=10, n_clusters=20).fit(X) + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_warning_n_init_precomputed_centers(Estimator): + # Check that a warning is raised when n_init > 1 and an array is passed for + # the init parameter. + with pytest.warns( + RuntimeWarning, + match="Explicit initial center position passed: performing only one init", + ): + Estimator(init=centers, n_clusters=n_clusters, n_init=10).fit(X) + + +def test_minibatch_sensible_reassign(global_random_seed): + # check that identical initial clusters are reassigned + # also a regression test for when there are more desired reassignments than + # samples. + zeroed_X, true_labels = make_blobs( + n_samples=100, centers=5, random_state=global_random_seed + ) + zeroed_X[::2, :] = 0 + + km = MiniBatchKMeans( + n_clusters=20, batch_size=10, random_state=global_random_seed, init="random" + ).fit(zeroed_X) + # there should not be too many exact zero cluster centers + assert km.cluster_centers_.any(axis=1).sum() > 10 + + # do the same with batch-size > X.shape[0] (regression test) + km = MiniBatchKMeans( + n_clusters=20, batch_size=200, random_state=global_random_seed, init="random" + ).fit(zeroed_X) + # there should not be too many exact zero cluster centers + assert km.cluster_centers_.any(axis=1).sum() > 10 + + # do the same with partial_fit API + km = MiniBatchKMeans(n_clusters=20, random_state=global_random_seed, init="random") + for i in range(100): + km.partial_fit(zeroed_X) + # there should not be too many exact zero cluster centers + assert km.cluster_centers_.any(axis=1).sum() > 10 + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, + ids=data_containers_ids, +) +def test_minibatch_reassign(input_data, global_random_seed): + # Check the reassignment part of the minibatch step with very high or very + # low reassignment ratio. + perfect_centers = np.empty((n_clusters, n_features)) + for i in range(n_clusters): + perfect_centers[i] = X[true_labels == i].mean(axis=0) + + sample_weight = np.ones(n_samples) + centers_new = np.empty_like(perfect_centers) + + # Give a perfect initialization, but a large reassignment_ratio, as a + # result many centers should be reassigned and the model should no longer + # be good + score_before = -_labels_inertia(input_data, sample_weight, perfect_centers, 1)[1] + + _mini_batch_step( + input_data, + sample_weight, + perfect_centers, + centers_new, + np.zeros(n_clusters), + np.random.RandomState(global_random_seed), + random_reassign=True, + reassignment_ratio=1, + ) + + score_after = -_labels_inertia(input_data, sample_weight, centers_new, 1)[1] + + assert score_before > score_after + + # Give a perfect initialization, with a small reassignment_ratio, + # no center should be reassigned. + _mini_batch_step( + input_data, + sample_weight, + perfect_centers, + centers_new, + np.zeros(n_clusters), + np.random.RandomState(global_random_seed), + random_reassign=True, + reassignment_ratio=1e-15, + ) + + assert_allclose(centers_new, perfect_centers) + + +def test_minibatch_with_many_reassignments(): + # Test for the case that the number of clusters to reassign is bigger + # than the batch_size. Run the test with 100 clusters and a batch_size of + # 10 because it turned out that these values ensure that the number of + # clusters to reassign is always bigger than the batch_size. + MiniBatchKMeans( + n_clusters=100, + batch_size=10, + init_size=n_samples, + random_state=42, + verbose=True, + ).fit(X) + + +def test_minibatch_kmeans_init_size(): + # Check the internal _init_size attribute of MiniBatchKMeans + + # default init size should be 3 * batch_size + km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1).fit(X) + assert km._init_size == 15 + + # if 3 * batch size < n_clusters, it should then be 3 * n_clusters + km = MiniBatchKMeans(n_clusters=10, batch_size=1, n_init=1).fit(X) + assert km._init_size == 30 + + # it should not be larger than n_samples + km = MiniBatchKMeans( + n_clusters=10, batch_size=5, n_init=1, init_size=n_samples + 1 + ).fit(X) + assert km._init_size == n_samples + + +@pytest.mark.parametrize("tol, max_no_improvement", [(1e-4, None), (0, 10)]) +def test_minibatch_declared_convergence(capsys, tol, max_no_improvement): + # Check convergence detection based on ewa batch inertia or on + # small center change. + X, _, centers = make_blobs(centers=3, random_state=0, return_centers=True) + + km = MiniBatchKMeans( + n_clusters=3, + init=centers, + batch_size=20, + tol=tol, + random_state=0, + max_iter=10, + n_init=1, + verbose=1, + max_no_improvement=max_no_improvement, + ) + + km.fit(X) + assert 1 < km.n_iter_ < 10 + + captured = capsys.readouterr() + if max_no_improvement is None: + assert "Converged (small centers change)" in captured.out + if tol == 0: + assert "Converged (lack of improvement in inertia)" in captured.out + + +def test_minibatch_iter_steps(): + # Check consistency of n_iter_ and n_steps_ attributes. + batch_size = 30 + n_samples = X.shape[0] + km = MiniBatchKMeans(n_clusters=3, batch_size=batch_size, random_state=0).fit(X) + + # n_iter_ is the number of started epochs + assert km.n_iter_ == np.ceil((km.n_steps_ * batch_size) / n_samples) + assert isinstance(km.n_iter_, int) + + # without stopping condition, max_iter should be reached + km = MiniBatchKMeans( + n_clusters=3, + batch_size=batch_size, + random_state=0, + tol=0, + max_no_improvement=None, + max_iter=10, + ).fit(X) + + assert km.n_iter_ == 10 + assert km.n_steps_ == (10 * n_samples) // batch_size + assert isinstance(km.n_steps_, int) + + +def test_kmeans_copyx(): + # Check that copy_x=False returns nearly equal X after de-centering. + my_X = X.copy() + km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42) + km.fit(my_X) + _check_fitted_model(km) + + # check that my_X is de-centered + assert_allclose(my_X, X) + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_score_max_iter(Estimator, global_random_seed): + # Check that fitting KMeans or MiniBatchKMeans with more iterations gives + # better score + X = np.random.RandomState(global_random_seed).randn(100, 10) + + km1 = Estimator(n_init=1, random_state=global_random_seed, max_iter=1) + s1 = km1.fit(X).score(X) + km2 = Estimator(n_init=1, random_state=global_random_seed, max_iter=10) + s2 = km2.fit(X).score(X) + assert s2 > s1 + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize( + "Estimator, algorithm", + [(KMeans, "lloyd"), (KMeans, "elkan"), (MiniBatchKMeans, None)], +) +@pytest.mark.parametrize("max_iter", [2, 100]) +def test_kmeans_predict( + Estimator, algorithm, array_constr, max_iter, global_dtype, global_random_seed +): + # Check the predict method and the equivalence between fit.predict and + # fit_predict. + X, _ = make_blobs( + n_samples=200, n_features=10, centers=10, random_state=global_random_seed + ) + X = array_constr(X, dtype=global_dtype) + + km = Estimator( + n_clusters=10, + init="random", + n_init=10, + max_iter=max_iter, + random_state=global_random_seed, + ) + if algorithm is not None: + km.set_params(algorithm=algorithm) + km.fit(X) + labels = km.labels_ + + # re-predict labels for training set using predict + pred = km.predict(X) + assert_array_equal(pred, labels) + + # re-predict labels for training set using fit_predict + pred = km.fit_predict(X) + assert_array_equal(pred, labels) + + # predict centroid labels + pred = km.predict(km.cluster_centers_) + assert_array_equal(pred, np.arange(10)) + + +@pytest.mark.parametrize("X_csr", X_as_any_csr) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_dense_sparse(Estimator, X_csr, global_random_seed): + # Check that the results are the same for dense and sparse input. + sample_weight = np.random.RandomState(global_random_seed).random_sample( + (n_samples,) + ) + km_dense = Estimator( + n_clusters=n_clusters, random_state=global_random_seed, n_init=1 + ) + km_dense.fit(X, sample_weight=sample_weight) + km_sparse = Estimator( + n_clusters=n_clusters, random_state=global_random_seed, n_init=1 + ) + km_sparse.fit(X_csr, sample_weight=sample_weight) + + assert_array_equal(km_dense.labels_, km_sparse.labels_) + assert_allclose(km_dense.cluster_centers_, km_sparse.cluster_centers_) + + +@pytest.mark.parametrize("X_csr", X_as_any_csr) +@pytest.mark.parametrize( + "init", ["random", "k-means++", centers], ids=["random", "k-means++", "ndarray"] +) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_predict_dense_sparse(Estimator, init, X_csr): + # check that models trained on sparse input also works for dense input at + # predict time and vice versa. + n_init = 10 if isinstance(init, str) else 1 + km = Estimator(n_clusters=n_clusters, init=init, n_init=n_init, random_state=0) + + km.fit(X_csr) + assert_array_equal(km.predict(X), km.labels_) + + km.fit(X) + assert_array_equal(km.predict(X_csr), km.labels_) + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize("dtype", [np.int32, np.int64]) +@pytest.mark.parametrize("init", ["k-means++", "ndarray"]) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_integer_input(Estimator, array_constr, dtype, init, global_random_seed): + # Check that KMeans and MiniBatchKMeans work with integer input. + X_dense = np.array([[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]) + X = array_constr(X_dense, dtype=dtype) + + n_init = 1 if init == "ndarray" else 10 + init = X_dense[:2] if init == "ndarray" else init + + km = Estimator( + n_clusters=2, init=init, n_init=n_init, random_state=global_random_seed + ) + if Estimator is MiniBatchKMeans: + km.set_params(batch_size=2) + + km.fit(X) + + # Internally integer input should be converted to float64 + assert km.cluster_centers_.dtype == np.float64 + + expected_labels = [0, 1, 1, 0, 0, 1] + assert_allclose(v_measure_score(km.labels_, expected_labels), 1.0) + + # Same with partial_fit (#14314) + if Estimator is MiniBatchKMeans: + km = clone(km).partial_fit(X) + assert km.cluster_centers_.dtype == np.float64 + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_transform(Estimator, global_random_seed): + # Check the transform method + km = Estimator(n_clusters=n_clusters, random_state=global_random_seed).fit(X) + + # Transorfming cluster_centers_ should return the pairwise distances + # between centers + Xt = km.transform(km.cluster_centers_) + assert_allclose(Xt, pairwise_distances(km.cluster_centers_)) + # In particular, diagonal must be 0 + assert_array_equal(Xt.diagonal(), np.zeros(n_clusters)) + + # Transorfming X should return the pairwise distances between X and the + # centers + Xt = km.transform(X) + assert_allclose(Xt, pairwise_distances(X, km.cluster_centers_)) + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_fit_transform(Estimator, global_random_seed): + # Check equivalence between fit.transform and fit_transform + X1 = Estimator(random_state=global_random_seed, n_init=1).fit(X).transform(X) + X2 = Estimator(random_state=global_random_seed, n_init=1).fit_transform(X) + assert_allclose(X1, X2) + + +def test_n_init(global_random_seed): + # Check that increasing the number of init increases the quality + previous_inertia = np.inf + for n_init in [1, 5, 10]: + # set max_iter=1 to avoid finding the global minimum and get the same + # inertia each time + km = KMeans( + n_clusters=n_clusters, + init="random", + n_init=n_init, + random_state=global_random_seed, + max_iter=1, + ).fit(X) + assert km.inertia_ <= previous_inertia + + +def test_k_means_function(global_random_seed): + # test calling the k_means function directly + cluster_centers, labels, inertia = k_means( + X, n_clusters=n_clusters, sample_weight=None, random_state=global_random_seed + ) + + assert cluster_centers.shape == (n_clusters, n_features) + assert np.unique(labels).shape[0] == n_clusters + + # check that the labels assignment are perfect (up to a permutation) + assert_allclose(v_measure_score(true_labels, labels), 1.0) + assert inertia > 0.0 + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, + ids=data_containers_ids, +) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_float_precision(Estimator, input_data, global_random_seed): + # Check that the results are the same for single and double precision. + km = Estimator(n_init=1, random_state=global_random_seed) + + inertia = {} + Xt = {} + centers = {} + labels = {} + + for dtype in [np.float64, np.float32]: + X = input_data.astype(dtype, copy=False) + km.fit(X) + + inertia[dtype] = km.inertia_ + Xt[dtype] = km.transform(X) + centers[dtype] = km.cluster_centers_ + labels[dtype] = km.labels_ + + # dtype of cluster centers has to be the dtype of the input data + assert km.cluster_centers_.dtype == dtype + + # same with partial_fit + if Estimator is MiniBatchKMeans: + km.partial_fit(X[0:3]) + assert km.cluster_centers_.dtype == dtype + + # compare arrays with low precision since the difference between 32 and + # 64 bit comes from an accumulation of rounding errors. + assert_allclose(inertia[np.float32], inertia[np.float64], rtol=1e-4) + assert_allclose(Xt[np.float32], Xt[np.float64], atol=Xt[np.float64].max() * 1e-4) + assert_allclose( + centers[np.float32], centers[np.float64], atol=centers[np.float64].max() * 1e-4 + ) + assert_array_equal(labels[np.float32], labels[np.float64]) + + +@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64]) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_centers_not_mutated(Estimator, dtype): + # Check that KMeans and MiniBatchKMeans won't mutate the user provided + # init centers silently even if input data and init centers have the same + # type. + X_new_type = X.astype(dtype, copy=False) + centers_new_type = centers.astype(dtype, copy=False) + + km = Estimator(init=centers_new_type, n_clusters=n_clusters, n_init=1) + km.fit(X_new_type) + + assert not np.may_share_memory(km.cluster_centers_, centers_new_type) + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, + ids=data_containers_ids, +) +def test_kmeans_init_fitted_centers(input_data): + # Check that starting fitting from a local optimum shouldn't change the + # solution + km1 = KMeans(n_clusters=n_clusters).fit(input_data) + km2 = KMeans(n_clusters=n_clusters, init=km1.cluster_centers_, n_init=1).fit( + input_data + ) + + assert_allclose(km1.cluster_centers_, km2.cluster_centers_) + + +def test_kmeans_warns_less_centers_than_unique_points(global_random_seed): + # Check KMeans when the number of found clusters is smaller than expected + X = np.asarray([[0, 0], [0, 1], [1, 0], [1, 0]]) # last point is duplicated + km = KMeans(n_clusters=4, random_state=global_random_seed) + + # KMeans should warn that fewer labels than cluster centers have been used + msg = ( + r"Number of distinct clusters \(3\) found smaller than " + r"n_clusters \(4\). Possibly due to duplicate points in X." + ) + with pytest.warns(ConvergenceWarning, match=msg): + km.fit(X) + # only three distinct points, so only three clusters + # can have points assigned to them + assert set(km.labels_) == set(range(3)) + + +def _sort_centers(centers): + return np.sort(centers, axis=0) + + +def test_weighted_vs_repeated(global_random_seed): + # Check that a sample weight of N should yield the same result as an N-fold + # repetition of the sample. Valid only if init is precomputed, otherwise + # rng produces different results. Not valid for MinibatchKMeans due to rng + # to extract minibatches. + sample_weight = np.random.RandomState(global_random_seed).randint( + 1, 5, size=n_samples + ) + X_repeat = np.repeat(X, sample_weight, axis=0) + + km = KMeans( + init=centers, n_init=1, n_clusters=n_clusters, random_state=global_random_seed + ) + + km_weighted = clone(km).fit(X, sample_weight=sample_weight) + repeated_labels = np.repeat(km_weighted.labels_, sample_weight) + km_repeated = clone(km).fit(X_repeat) + + assert_array_equal(km_repeated.labels_, repeated_labels) + assert_allclose(km_weighted.inertia_, km_repeated.inertia_) + assert_allclose( + _sort_centers(km_weighted.cluster_centers_), + _sort_centers(km_repeated.cluster_centers_), + ) + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, + ids=data_containers_ids, +) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_unit_weights_vs_no_weights(Estimator, input_data, global_random_seed): + # Check that not passing sample weights should be equivalent to passing + # sample weights all equal to one. + sample_weight = np.ones(n_samples) + + km = Estimator(n_clusters=n_clusters, random_state=global_random_seed, n_init=1) + km_none = clone(km).fit(input_data, sample_weight=None) + km_ones = clone(km).fit(input_data, sample_weight=sample_weight) + + assert_array_equal(km_none.labels_, km_ones.labels_) + assert_allclose(km_none.cluster_centers_, km_ones.cluster_centers_) + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, + ids=data_containers_ids, +) +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_scaled_weights(Estimator, input_data, global_random_seed): + # Check that scaling all sample weights by a common factor + # shouldn't change the result + sample_weight = np.random.RandomState(global_random_seed).uniform(size=n_samples) + + km = Estimator(n_clusters=n_clusters, random_state=global_random_seed, n_init=1) + km_orig = clone(km).fit(input_data, sample_weight=sample_weight) + km_scaled = clone(km).fit(input_data, sample_weight=0.5 * sample_weight) + + assert_array_equal(km_orig.labels_, km_scaled.labels_) + assert_allclose(km_orig.cluster_centers_, km_scaled.cluster_centers_) + + +def test_kmeans_elkan_iter_attribute(): + # Regression test on bad n_iter_ value. Previous bug n_iter_ was one off + # it's right value (#11340). + km = KMeans(algorithm="elkan", max_iter=1).fit(X) + assert km.n_iter_ == 1 + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +def test_kmeans_empty_cluster_relocated(array_constr): + # check that empty clusters are correctly relocated when using sample + # weights (#13486) + X = array_constr([[-1], [1]]) + sample_weight = [1.9, 0.1] + init = np.array([[-1], [10]]) + + km = KMeans(n_clusters=2, init=init, n_init=1) + km.fit(X, sample_weight=sample_weight) + + assert len(set(km.labels_)) == 2 + assert_allclose(km.cluster_centers_, [[-1], [1]]) + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_result_equal_in_diff_n_threads(Estimator, global_random_seed): + # Check that KMeans/MiniBatchKMeans give the same results in parallel mode + # than in sequential mode. + rnd = np.random.RandomState(global_random_seed) + X = rnd.normal(size=(50, 10)) + + with threadpool_limits(limits=1, user_api="openmp"): + result_1 = ( + Estimator(n_clusters=n_clusters, random_state=global_random_seed) + .fit(X) + .labels_ + ) + with threadpool_limits(limits=2, user_api="openmp"): + result_2 = ( + Estimator(n_clusters=n_clusters, random_state=global_random_seed) + .fit(X) + .labels_ + ) + assert_array_equal(result_1, result_2) + + +def test_warning_elkan_1_cluster(): + # Check warning messages specific to KMeans + with pytest.warns( + RuntimeWarning, + match="algorithm='elkan' doesn't make sense for a single cluster", + ): + KMeans(n_clusters=1, algorithm="elkan").fit(X) + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize("algo", ["lloyd", "elkan"]) +def test_k_means_1_iteration(array_constr, algo, global_random_seed): + # check the results after a single iteration (E-step M-step E-step) by + # comparing against a pure python implementation. + X = np.random.RandomState(global_random_seed).uniform(size=(100, 5)) + init_centers = X[:5] + X = array_constr(X) + + def py_kmeans(X, init): + new_centers = init.copy() + labels = pairwise_distances_argmin(X, init) + for label in range(init.shape[0]): + new_centers[label] = X[labels == label].mean(axis=0) + labels = pairwise_distances_argmin(X, new_centers) + return labels, new_centers + + py_labels, py_centers = py_kmeans(X, init_centers) + + cy_kmeans = KMeans( + n_clusters=5, n_init=1, init=init_centers, algorithm=algo, max_iter=1 + ).fit(X) + cy_labels = cy_kmeans.labels_ + cy_centers = cy_kmeans.cluster_centers_ + + assert_array_equal(py_labels, cy_labels) + assert_allclose(py_centers, cy_centers) + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +@pytest.mark.parametrize("squared", [True, False]) +def test_euclidean_distance(dtype, squared, global_random_seed): + # Check that the _euclidean_(dense/sparse)_dense helpers produce correct + # results + rng = np.random.RandomState(global_random_seed) + a_sparse = sp.random( + 1, 100, density=0.5, format="csr", random_state=rng, dtype=dtype + ) + a_dense = a_sparse.toarray().reshape(-1) + b = rng.randn(100).astype(dtype, copy=False) + b_squared_norm = (b**2).sum() + + expected = ((a_dense - b) ** 2).sum() + expected = expected if squared else np.sqrt(expected) + + distance_dense_dense = _euclidean_dense_dense_wrapper(a_dense, b, squared) + distance_sparse_dense = _euclidean_sparse_dense_wrapper( + a_sparse.data, a_sparse.indices, b, b_squared_norm, squared + ) + + rtol = 1e-4 if dtype == np.float32 else 1e-7 + assert_allclose(distance_dense_dense, distance_sparse_dense, rtol=rtol) + assert_allclose(distance_dense_dense, expected, rtol=rtol) + assert_allclose(distance_sparse_dense, expected, rtol=rtol) + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_inertia(dtype, global_random_seed): + # Check that the _inertia_(dense/sparse) helpers produce correct results. + rng = np.random.RandomState(global_random_seed) + X_sparse = sp.random( + 100, 10, density=0.5, format="csr", random_state=rng, dtype=dtype + ) + X_dense = X_sparse.toarray() + sample_weight = rng.randn(100).astype(dtype, copy=False) + centers = rng.randn(5, 10).astype(dtype, copy=False) + labels = rng.randint(5, size=100, dtype=np.int32) + + distances = ((X_dense - centers[labels]) ** 2).sum(axis=1) + expected = np.sum(distances * sample_weight) + + inertia_dense = _inertia_dense(X_dense, sample_weight, centers, labels, n_threads=1) + inertia_sparse = _inertia_sparse( + X_sparse, sample_weight, centers, labels, n_threads=1 + ) + + rtol = 1e-4 if dtype == np.float32 else 1e-6 + assert_allclose(inertia_dense, inertia_sparse, rtol=rtol) + assert_allclose(inertia_dense, expected, rtol=rtol) + assert_allclose(inertia_sparse, expected, rtol=rtol) + + # Check the single_label parameter. + label = 1 + mask = labels == label + distances = ((X_dense[mask] - centers[label]) ** 2).sum(axis=1) + expected = np.sum(distances * sample_weight[mask]) + + inertia_dense = _inertia_dense( + X_dense, sample_weight, centers, labels, n_threads=1, single_label=label + ) + inertia_sparse = _inertia_sparse( + X_sparse, sample_weight, centers, labels, n_threads=1, single_label=label + ) + + assert_allclose(inertia_dense, inertia_sparse, rtol=rtol) + assert_allclose(inertia_dense, expected, rtol=rtol) + assert_allclose(inertia_sparse, expected, rtol=rtol) + + +@pytest.mark.parametrize("Klass, default_n_init", [(KMeans, 10), (MiniBatchKMeans, 3)]) +def test_n_init_auto(Klass, default_n_init): + est = Klass(n_init="auto", init="k-means++") + est.fit(X) + assert est._n_init == 1 + + est = Klass(n_init="auto", init="random") + est.fit(X) + assert est._n_init == 10 if Klass.__name__ == "KMeans" else 3 + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +def test_sample_weight_unchanged(Estimator): + # Check that sample_weight is not modified in place by KMeans (#17204) + X = np.array([[1], [2], [4]]) + sample_weight = np.array([0.5, 0.2, 0.3]) + Estimator(n_clusters=2, random_state=0).fit(X, sample_weight=sample_weight) + + assert_array_equal(sample_weight, np.array([0.5, 0.2, 0.3])) + + +@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans]) +@pytest.mark.parametrize( + "param, match", + [ + ({"n_clusters": n_samples + 1}, r"n_samples.* should be >= n_clusters"), + ( + {"init": X[:2]}, + r"The shape of the initial centers .* does not match " + r"the number of clusters", + ), + ( + {"init": lambda X_, k, random_state: X_[:2]}, + r"The shape of the initial centers .* does not match " + r"the number of clusters", + ), + ( + {"init": X[:8, :2]}, + r"The shape of the initial centers .* does not match " + r"the number of features of the data", + ), + ( + {"init": lambda X_, k, random_state: X_[:8, :2]}, + r"The shape of the initial centers .* does not match " + r"the number of features of the data", + ), + ], +) +def test_wrong_params(Estimator, param, match): + # Check that error are raised with clear error message when wrong values + # are passed for the parameters + # Set n_init=1 by default to avoid warning with precomputed init + km = Estimator(n_init=1) + with pytest.raises(ValueError, match=match): + km.set_params(**param).fit(X) + + +@pytest.mark.parametrize( + "param, match", + [ + ( + {"x_squared_norms": X[:2]}, + r"The length of x_squared_norms .* should " + r"be equal to the length of n_samples", + ), + ], +) +def test_kmeans_plusplus_wrong_params(param, match): + with pytest.raises(ValueError, match=match): + kmeans_plusplus(X, n_clusters, **param) + + +@pytest.mark.parametrize( + "input_data", + [X] + X_as_any_csr, +) +@pytest.mark.parametrize("dtype", [np.float64, np.float32]) +def test_kmeans_plusplus_output(input_data, dtype, global_random_seed): + # Check for the correct number of seeds and all positive values + data = input_data.astype(dtype) + centers, indices = kmeans_plusplus( + data, n_clusters, random_state=global_random_seed + ) + + # Check there are the correct number of indices and that all indices are + # positive and within the number of samples + assert indices.shape[0] == n_clusters + assert (indices >= 0).all() + assert (indices <= data.shape[0]).all() + + # Check for the correct number of seeds and that they are bound by the data + assert centers.shape[0] == n_clusters + assert (centers.max(axis=0) <= data.max(axis=0)).all() + assert (centers.min(axis=0) >= data.min(axis=0)).all() + + # Check that indices correspond to reported centers + # Use X for comparison rather than data, test still works against centers + # calculated with sparse data. + assert_allclose(X[indices].astype(dtype), centers) + + +@pytest.mark.parametrize("x_squared_norms", [row_norms(X, squared=True), None]) +def test_kmeans_plusplus_norms(x_squared_norms): + # Check that defining x_squared_norms returns the same as default=None. + centers, indices = kmeans_plusplus(X, n_clusters, x_squared_norms=x_squared_norms) + + assert_allclose(X[indices], centers) + + +def test_kmeans_plusplus_dataorder(global_random_seed): + # Check that memory layout does not effect result + centers_c, _ = kmeans_plusplus(X, n_clusters, random_state=global_random_seed) + + X_fortran = np.asfortranarray(X) + + centers_fortran, _ = kmeans_plusplus( + X_fortran, n_clusters, random_state=global_random_seed + ) + + assert_allclose(centers_c, centers_fortran) + + +def test_is_same_clustering(): + # Sanity check for the _is_same_clustering utility function + labels1 = np.array([1, 0, 0, 1, 2, 0, 2, 1], dtype=np.int32) + assert _is_same_clustering(labels1, labels1, 3) + + # these other labels represent the same clustering since we can retrieve the first + # labels by simply renaming the labels: 0 -> 1, 1 -> 2, 2 -> 0. + labels2 = np.array([0, 2, 2, 0, 1, 2, 1, 0], dtype=np.int32) + assert _is_same_clustering(labels1, labels2, 3) + + # these other labels do not represent the same clustering since not all ones are + # mapped to a same value + labels3 = np.array([1, 0, 0, 2, 2, 0, 2, 1], dtype=np.int32) + assert not _is_same_clustering(labels1, labels3, 3) + + +@pytest.mark.parametrize( + "kwargs", ({"init": np.str_("k-means++")}, {"init": [[0, 0], [1, 1]], "n_init": 1}) +) +def test_kmeans_with_array_like_or_np_scalar_init(kwargs): + """Check that init works with numpy scalar strings. + + Non-regression test for #21964. + """ + X = np.asarray([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=np.float64) + + clustering = KMeans(n_clusters=2, **kwargs) + # Does not raise + clustering.fit(X) + + +@pytest.mark.parametrize( + "Klass, method", + [(KMeans, "fit"), (MiniBatchKMeans, "fit"), (MiniBatchKMeans, "partial_fit")], +) +def test_feature_names_out(Klass, method): + """Check `feature_names_out` for `KMeans` and `MiniBatchKMeans`.""" + class_name = Klass.__name__.lower() + kmeans = Klass() + getattr(kmeans, method)(X) + n_clusters = kmeans.cluster_centers_.shape[0] + + names_out = kmeans.get_feature_names_out() + assert_array_equal([f"{class_name}{i}" for i in range(n_clusters)], names_out) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None]) +def test_predict_does_not_change_cluster_centers(csr_container): + """Check that predict does not change cluster centers. + + Non-regression test for gh-24253. + """ + X, _ = make_blobs(n_samples=200, n_features=10, centers=10, random_state=0) + if csr_container is not None: + X = csr_container(X) + + kmeans = KMeans() + y_pred1 = kmeans.fit_predict(X) + # Make cluster_centers readonly + kmeans.cluster_centers_ = create_memmap_backed_data(kmeans.cluster_centers_) + kmeans.labels_ = create_memmap_backed_data(kmeans.labels_) + + y_pred2 = kmeans.predict(X) + assert_array_equal(y_pred1, y_pred2) + + +@pytest.mark.parametrize("init", ["k-means++", "random"]) +def test_sample_weight_init(init, global_random_seed): + """Check that sample weight is used during init. + + `_init_centroids` is shared across all classes inheriting from _BaseKMeans so + it's enough to check for KMeans. + """ + rng = np.random.RandomState(global_random_seed) + X, _ = make_blobs( + n_samples=200, n_features=10, centers=10, random_state=global_random_seed + ) + x_squared_norms = row_norms(X, squared=True) + + kmeans = KMeans() + clusters_weighted = kmeans._init_centroids( + X=X, + x_squared_norms=x_squared_norms, + init=init, + sample_weight=rng.uniform(size=X.shape[0]), + n_centroids=5, + random_state=np.random.RandomState(global_random_seed), + ) + clusters = kmeans._init_centroids( + X=X, + x_squared_norms=x_squared_norms, + init=init, + sample_weight=np.ones(X.shape[0]), + n_centroids=5, + random_state=np.random.RandomState(global_random_seed), + ) + with pytest.raises(AssertionError): + assert_allclose(clusters_weighted, clusters) + + +@pytest.mark.parametrize("init", ["k-means++", "random"]) +def test_sample_weight_zero(init, global_random_seed): + """Check that if sample weight is 0, this sample won't be chosen. + + `_init_centroids` is shared across all classes inheriting from _BaseKMeans so + it's enough to check for KMeans. + """ + rng = np.random.RandomState(global_random_seed) + X, _ = make_blobs( + n_samples=100, n_features=5, centers=5, random_state=global_random_seed + ) + sample_weight = rng.uniform(size=X.shape[0]) + sample_weight[::2] = 0 + x_squared_norms = row_norms(X, squared=True) + + kmeans = KMeans() + clusters_weighted = kmeans._init_centroids( + X=X, + x_squared_norms=x_squared_norms, + init=init, + sample_weight=sample_weight, + n_centroids=10, + random_state=np.random.RandomState(global_random_seed), + ) + # No center should be one of the 0 sample weight point + # (i.e. be at a distance=0 from it) + d = euclidean_distances(X[::2], clusters_weighted) + assert not np.any(np.isclose(d, 0)) + + +@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids) +@pytest.mark.parametrize("algorithm", ["lloyd", "elkan"]) +def test_relocating_with_duplicates(algorithm, array_constr): + """Check that kmeans stops when there are more centers than non-duplicate samples + + Non-regression test for issue: + https://github.com/scikit-learn/scikit-learn/issues/28055 + """ + X = np.array([[0, 0], [1, 1], [1, 1], [1, 0], [0, 1]]) + km = KMeans(n_clusters=5, init=X, algorithm=algorithm) + + msg = r"Number of distinct clusters \(4\) found smaller than n_clusters \(5\)" + with pytest.warns(ConvergenceWarning, match=msg): + km.fit(array_constr(X)) + + assert km.n_iter_ == 1 diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_mean_shift.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_mean_shift.py new file mode 100644 index 0000000000000000000000000000000000000000..265c72d0c4ce1d009f8298e70dea902f2aa5d212 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_mean_shift.py @@ -0,0 +1,206 @@ +""" +Testing for mean shift clustering methods + +""" + +import warnings + +import numpy as np +import pytest + +from sklearn.cluster import MeanShift, estimate_bandwidth, get_bin_seeds, mean_shift +from sklearn.datasets import make_blobs +from sklearn.metrics import v_measure_score +from sklearn.utils._testing import assert_allclose, assert_array_equal + +n_clusters = 3 +centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 +X, _ = make_blobs( + n_samples=300, + n_features=2, + centers=centers, + cluster_std=0.4, + shuffle=True, + random_state=11, +) + + +def test_estimate_bandwidth(): + # Test estimate_bandwidth + bandwidth = estimate_bandwidth(X, n_samples=200) + assert 0.9 <= bandwidth <= 1.5 + + +def test_estimate_bandwidth_1sample(global_dtype): + # Test estimate_bandwidth when n_samples=1 and quantile<1, so that + # n_neighbors is set to 1. + bandwidth = estimate_bandwidth( + X.astype(global_dtype, copy=False), n_samples=1, quantile=0.3 + ) + + assert bandwidth.dtype == X.dtype + assert bandwidth == pytest.approx(0.0, abs=1e-5) + + +@pytest.mark.parametrize( + "bandwidth, cluster_all, expected, first_cluster_label", + [(1.2, True, 3, 0), (1.2, False, 4, -1)], +) +def test_mean_shift( + global_dtype, bandwidth, cluster_all, expected, first_cluster_label +): + # Test MeanShift algorithm + X_with_global_dtype = X.astype(global_dtype, copy=False) + ms = MeanShift(bandwidth=bandwidth, cluster_all=cluster_all) + labels = ms.fit(X_with_global_dtype).labels_ + labels_unique = np.unique(labels) + n_clusters_ = len(labels_unique) + assert n_clusters_ == expected + assert labels_unique[0] == first_cluster_label + assert ms.cluster_centers_.dtype == global_dtype + + cluster_centers, labels_mean_shift = mean_shift( + X_with_global_dtype, cluster_all=cluster_all + ) + labels_mean_shift_unique = np.unique(labels_mean_shift) + n_clusters_mean_shift = len(labels_mean_shift_unique) + assert n_clusters_mean_shift == expected + assert labels_mean_shift_unique[0] == first_cluster_label + assert cluster_centers.dtype == global_dtype + + +def test_parallel(global_dtype): + centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 + X, _ = make_blobs( + n_samples=50, + n_features=2, + centers=centers, + cluster_std=0.4, + shuffle=True, + random_state=11, + ) + + X = X.astype(global_dtype, copy=False) + + ms1 = MeanShift(n_jobs=2) + ms1.fit(X) + + ms2 = MeanShift() + ms2.fit(X) + + assert_allclose(ms1.cluster_centers_, ms2.cluster_centers_) + assert ms1.cluster_centers_.dtype == ms2.cluster_centers_.dtype + assert_array_equal(ms1.labels_, ms2.labels_) + + +def test_meanshift_predict(global_dtype): + # Test MeanShift.predict + ms = MeanShift(bandwidth=1.2) + X_with_global_dtype = X.astype(global_dtype, copy=False) + labels = ms.fit_predict(X_with_global_dtype) + labels2 = ms.predict(X_with_global_dtype) + assert_array_equal(labels, labels2) + + +def test_meanshift_all_orphans(): + # init away from the data, crash with a sensible warning + ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]]) + msg = "No point was within bandwidth=0.1" + with pytest.raises(ValueError, match=msg): + ms.fit( + X, + ) + + +def test_unfitted(): + # Non-regression: before fit, there should be not fitted attributes. + ms = MeanShift() + assert not hasattr(ms, "cluster_centers_") + assert not hasattr(ms, "labels_") + + +def test_cluster_intensity_tie(global_dtype): + X = np.array([[1, 1], [2, 1], [1, 0], [4, 7], [3, 5], [3, 6]], dtype=global_dtype) + c1 = MeanShift(bandwidth=2).fit(X) + + X = np.array([[4, 7], [3, 5], [3, 6], [1, 1], [2, 1], [1, 0]], dtype=global_dtype) + c2 = MeanShift(bandwidth=2).fit(X) + assert_array_equal(c1.labels_, [1, 1, 1, 0, 0, 0]) + assert_array_equal(c2.labels_, [0, 0, 0, 1, 1, 1]) + + +def test_bin_seeds(global_dtype): + # Test the bin seeding technique which can be used in the mean shift + # algorithm + # Data is just 6 points in the plane + X = np.array( + [[1.0, 1.0], [1.4, 1.4], [1.8, 1.2], [2.0, 1.0], [2.1, 1.1], [0.0, 0.0]], + dtype=global_dtype, + ) + + # With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be + # found + ground_truth = {(1.0, 1.0), (2.0, 1.0), (0.0, 0.0)} + test_bins = get_bin_seeds(X, 1, 1) + test_result = set(tuple(p) for p in test_bins) + assert len(ground_truth.symmetric_difference(test_result)) == 0 + + # With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be + # found + ground_truth = {(1.0, 1.0), (2.0, 1.0)} + test_bins = get_bin_seeds(X, 1, 2) + test_result = set(tuple(p) for p in test_bins) + assert len(ground_truth.symmetric_difference(test_result)) == 0 + + # With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found + # we bail and use the whole data here. + with warnings.catch_warnings(record=True): + test_bins = get_bin_seeds(X, 0.01, 1) + assert_allclose(test_bins, X) + + # tight clusters around [0, 0] and [1, 1], only get two bins + X, _ = make_blobs( + n_samples=100, + n_features=2, + centers=[[0, 0], [1, 1]], + cluster_std=0.1, + random_state=0, + ) + X = X.astype(global_dtype, copy=False) + test_bins = get_bin_seeds(X, 1) + assert_array_equal(test_bins, [[0, 0], [1, 1]]) + + +@pytest.mark.parametrize("max_iter", [1, 100]) +def test_max_iter(max_iter): + clusters1, _ = mean_shift(X, max_iter=max_iter) + ms = MeanShift(max_iter=max_iter).fit(X) + clusters2 = ms.cluster_centers_ + + assert ms.n_iter_ <= ms.max_iter + assert len(clusters1) == len(clusters2) + + for c1, c2 in zip(clusters1, clusters2): + assert np.allclose(c1, c2) + + +def test_mean_shift_zero_bandwidth(global_dtype): + # Check that mean shift works when the estimated bandwidth is 0. + X = np.array([1, 1, 1, 2, 2, 2, 3, 3], dtype=global_dtype).reshape(-1, 1) + + # estimate_bandwidth with default args returns 0 on this dataset + bandwidth = estimate_bandwidth(X) + assert bandwidth == 0 + + # get_bin_seeds with a 0 bin_size should return the dataset itself + assert get_bin_seeds(X, bin_size=bandwidth) is X + + # MeanShift with binning and a 0 estimated bandwidth should be equivalent + # to no binning. + ms_binning = MeanShift(bin_seeding=True, bandwidth=None).fit(X) + ms_nobinning = MeanShift(bin_seeding=False).fit(X) + expected_labels = np.array([0, 0, 0, 1, 1, 1, 2, 2]) + + assert v_measure_score(ms_binning.labels_, expected_labels) == pytest.approx(1) + assert v_measure_score(ms_nobinning.labels_, expected_labels) == pytest.approx(1) + assert_allclose(ms_binning.cluster_centers_, ms_nobinning.cluster_centers_) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_optics.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_optics.py new file mode 100644 index 0000000000000000000000000000000000000000..d6e415e114ee1ce42aaee2b07c9500b8b38eaea9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_optics.py @@ -0,0 +1,837 @@ +# Authors: Shane Grigsby +# Adrin Jalali +# License: BSD 3 clause +import warnings + +import numpy as np +import pytest + +from sklearn.cluster import DBSCAN, OPTICS +from sklearn.cluster._optics import _extend_region, _extract_xi_labels +from sklearn.cluster.tests.common import generate_clustered_data +from sklearn.datasets import make_blobs +from sklearn.exceptions import DataConversionWarning, EfficiencyWarning +from sklearn.metrics.cluster import contingency_matrix +from sklearn.metrics.pairwise import pairwise_distances +from sklearn.utils import shuffle +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import CSR_CONTAINERS + +rng = np.random.RandomState(0) +n_points_per_cluster = 10 +C1 = [-5, -2] + 0.8 * rng.randn(n_points_per_cluster, 2) +C2 = [4, -1] + 0.1 * rng.randn(n_points_per_cluster, 2) +C3 = [1, -2] + 0.2 * rng.randn(n_points_per_cluster, 2) +C4 = [-2, 3] + 0.3 * rng.randn(n_points_per_cluster, 2) +C5 = [3, -2] + 1.6 * rng.randn(n_points_per_cluster, 2) +C6 = [5, 6] + 2 * rng.randn(n_points_per_cluster, 2) +X = np.vstack((C1, C2, C3, C4, C5, C6)) + + +@pytest.mark.parametrize( + ("r_plot", "end"), + [ + [[10, 8.9, 8.8, 8.7, 7, 10], 3], + [[10, 8.9, 8.8, 8.7, 8.6, 7, 10], 0], + [[10, 8.9, 8.8, 8.7, 7, 6, np.inf], 4], + [[10, 8.9, 8.8, 8.7, 7, 6, np.inf], 4], + ], +) +def test_extend_downward(r_plot, end): + r_plot = np.array(r_plot) + ratio = r_plot[:-1] / r_plot[1:] + steep_downward = ratio >= 1 / 0.9 + upward = ratio < 1 + + e = _extend_region(steep_downward, upward, 0, 2) + assert e == end + + +@pytest.mark.parametrize( + ("r_plot", "end"), + [ + [[1, 2, 2.1, 2.2, 4, 8, 8, np.inf], 6], + [[1, 2, 2.1, 2.2, 2.3, 4, 8, 8, np.inf], 0], + [[1, 2, 2.1, 2, np.inf], 0], + [[1, 2, 2.1, np.inf], 2], + ], +) +def test_extend_upward(r_plot, end): + r_plot = np.array(r_plot) + ratio = r_plot[:-1] / r_plot[1:] + steep_upward = ratio <= 0.9 + downward = ratio > 1 + + e = _extend_region(steep_upward, downward, 0, 2) + assert e == end + + +@pytest.mark.parametrize( + ("ordering", "clusters", "expected"), + [ + [[0, 1, 2, 3], [[0, 1], [2, 3]], [0, 0, 1, 1]], + [[0, 1, 2, 3], [[0, 1], [3, 3]], [0, 0, -1, 1]], + [[0, 1, 2, 3], [[0, 1], [3, 3], [0, 3]], [0, 0, -1, 1]], + [[3, 1, 2, 0], [[0, 1], [3, 3], [0, 3]], [1, 0, -1, 0]], + ], +) +def test_the_extract_xi_labels(ordering, clusters, expected): + labels = _extract_xi_labels(ordering, clusters) + + assert_array_equal(labels, expected) + + +def test_extract_xi(global_dtype): + # small and easy test (no clusters around other clusters) + # but with a clear noise data. + rng = np.random.RandomState(0) + n_points_per_cluster = 5 + + C1 = [-5, -2] + 0.8 * rng.randn(n_points_per_cluster, 2) + C2 = [4, -1] + 0.1 * rng.randn(n_points_per_cluster, 2) + C3 = [1, -2] + 0.2 * rng.randn(n_points_per_cluster, 2) + C4 = [-2, 3] + 0.3 * rng.randn(n_points_per_cluster, 2) + C5 = [3, -2] + 0.6 * rng.randn(n_points_per_cluster, 2) + C6 = [5, 6] + 0.2 * rng.randn(n_points_per_cluster, 2) + + X = np.vstack((C1, C2, C3, C4, C5, np.array([[100, 100]]), C6)).astype( + global_dtype, copy=False + ) + expected_labels = np.r_[[2] * 5, [0] * 5, [1] * 5, [3] * 5, [1] * 5, -1, [4] * 5] + X, expected_labels = shuffle(X, expected_labels, random_state=rng) + + clust = OPTICS( + min_samples=3, min_cluster_size=2, max_eps=20, cluster_method="xi", xi=0.4 + ).fit(X) + assert_array_equal(clust.labels_, expected_labels) + + # check float min_samples and min_cluster_size + clust = OPTICS( + min_samples=0.1, min_cluster_size=0.08, max_eps=20, cluster_method="xi", xi=0.4 + ).fit(X) + assert_array_equal(clust.labels_, expected_labels) + + X = np.vstack((C1, C2, C3, C4, C5, np.array([[100, 100]] * 2), C6)).astype( + global_dtype, copy=False + ) + expected_labels = np.r_[ + [1] * 5, [3] * 5, [2] * 5, [0] * 5, [2] * 5, -1, -1, [4] * 5 + ] + X, expected_labels = shuffle(X, expected_labels, random_state=rng) + + clust = OPTICS( + min_samples=3, min_cluster_size=3, max_eps=20, cluster_method="xi", xi=0.3 + ).fit(X) + # this may fail if the predecessor correction is not at work! + assert_array_equal(clust.labels_, expected_labels) + + C1 = [[0, 0], [0, 0.1], [0, -0.1], [0.1, 0]] + C2 = [[10, 10], [10, 9], [10, 11], [9, 10]] + C3 = [[100, 100], [100, 90], [100, 110], [90, 100]] + X = np.vstack((C1, C2, C3)).astype(global_dtype, copy=False) + expected_labels = np.r_[[0] * 4, [1] * 4, [2] * 4] + X, expected_labels = shuffle(X, expected_labels, random_state=rng) + + clust = OPTICS( + min_samples=2, min_cluster_size=2, max_eps=np.inf, cluster_method="xi", xi=0.04 + ).fit(X) + assert_array_equal(clust.labels_, expected_labels) + + +def test_cluster_hierarchy_(global_dtype): + rng = np.random.RandomState(0) + n_points_per_cluster = 100 + C1 = [0, 0] + 2 * rng.randn(n_points_per_cluster, 2).astype( + global_dtype, copy=False + ) + C2 = [0, 0] + 50 * rng.randn(n_points_per_cluster, 2).astype( + global_dtype, copy=False + ) + X = np.vstack((C1, C2)) + X = shuffle(X, random_state=0) + + clusters = OPTICS(min_samples=20, xi=0.1).fit(X).cluster_hierarchy_ + assert clusters.shape == (2, 2) + diff = np.sum(clusters - np.array([[0, 99], [0, 199]])) + assert diff / len(X) < 0.05 + + +@pytest.mark.parametrize( + "csr_container, metric", + [(None, "minkowski")] + [(container, "euclidean") for container in CSR_CONTAINERS], +) +def test_correct_number_of_clusters(metric, csr_container): + # in 'auto' mode + + n_clusters = 3 + X = generate_clustered_data(n_clusters=n_clusters) + # Parameters chosen specifically for this task. + # Compute OPTICS + clust = OPTICS(max_eps=5.0 * 6.0, min_samples=4, xi=0.1, metric=metric) + clust.fit(csr_container(X) if csr_container is not None else X) + # number of clusters, ignoring noise if present + n_clusters_1 = len(set(clust.labels_)) - int(-1 in clust.labels_) + assert n_clusters_1 == n_clusters + + # check attribute types and sizes + assert clust.labels_.shape == (len(X),) + assert clust.labels_.dtype.kind == "i" + + assert clust.reachability_.shape == (len(X),) + assert clust.reachability_.dtype.kind == "f" + + assert clust.core_distances_.shape == (len(X),) + assert clust.core_distances_.dtype.kind == "f" + + assert clust.ordering_.shape == (len(X),) + assert clust.ordering_.dtype.kind == "i" + assert set(clust.ordering_) == set(range(len(X))) + + +def test_minimum_number_of_sample_check(): + # test that we check a minimum number of samples + msg = "min_samples must be no greater than" + + # Compute OPTICS + X = [[1, 1]] + clust = OPTICS(max_eps=5.0 * 0.3, min_samples=10, min_cluster_size=1.0) + + # Run the fit + with pytest.raises(ValueError, match=msg): + clust.fit(X) + + +def test_bad_extract(): + # Test an extraction of eps too close to original eps + msg = "Specify an epsilon smaller than 0.15. Got 0.3." + centers = [[1, 1], [-1, -1], [1, -1]] + X, labels_true = make_blobs( + n_samples=750, centers=centers, cluster_std=0.4, random_state=0 + ) + + # Compute OPTICS + clust = OPTICS(max_eps=5.0 * 0.03, cluster_method="dbscan", eps=0.3, min_samples=10) + with pytest.raises(ValueError, match=msg): + clust.fit(X) + + +def test_bad_reachability(): + msg = "All reachability values are inf. Set a larger max_eps." + centers = [[1, 1], [-1, -1], [1, -1]] + X, labels_true = make_blobs( + n_samples=750, centers=centers, cluster_std=0.4, random_state=0 + ) + + with pytest.warns(UserWarning, match=msg): + clust = OPTICS(max_eps=5.0 * 0.003, min_samples=10, eps=0.015) + clust.fit(X) + + +def test_nowarn_if_metric_bool_data_bool(): + # make sure no warning is raised if metric and data are both boolean + # non-regression test for + # https://github.com/scikit-learn/scikit-learn/issues/18996 + + pairwise_metric = "rogerstanimoto" + X = np.random.randint(2, size=(5, 2), dtype=bool) + + with warnings.catch_warnings(): + warnings.simplefilter("error", DataConversionWarning) + + OPTICS(metric=pairwise_metric).fit(X) + + +def test_warn_if_metric_bool_data_no_bool(): + # make sure a *single* conversion warning is raised if metric is boolean + # but data isn't + # non-regression test for + # https://github.com/scikit-learn/scikit-learn/issues/18996 + + pairwise_metric = "rogerstanimoto" + X = np.random.randint(2, size=(5, 2), dtype=np.int32) + msg = f"Data will be converted to boolean for metric {pairwise_metric}" + + with pytest.warns(DataConversionWarning, match=msg) as warn_record: + OPTICS(metric=pairwise_metric).fit(X) + assert len(warn_record) == 1 + + +def test_nowarn_if_metric_no_bool(): + # make sure no conversion warning is raised if + # metric isn't boolean, no matter what the data type is + pairwise_metric = "minkowski" + X_bool = np.random.randint(2, size=(5, 2), dtype=bool) + X_num = np.random.randint(2, size=(5, 2), dtype=np.int32) + + with warnings.catch_warnings(): + warnings.simplefilter("error", DataConversionWarning) + + # fit boolean data + OPTICS(metric=pairwise_metric).fit(X_bool) + # fit numeric data + OPTICS(metric=pairwise_metric).fit(X_num) + + +def test_close_extract(): + # Test extract where extraction eps is close to scaled max_eps + + centers = [[1, 1], [-1, -1], [1, -1]] + X, labels_true = make_blobs( + n_samples=750, centers=centers, cluster_std=0.4, random_state=0 + ) + + # Compute OPTICS + clust = OPTICS(max_eps=1.0, cluster_method="dbscan", eps=0.3, min_samples=10).fit(X) + # Cluster ordering starts at 0; max cluster label = 2 is 3 clusters + assert max(clust.labels_) == 2 + + +@pytest.mark.parametrize("eps", [0.1, 0.3, 0.5]) +@pytest.mark.parametrize("min_samples", [3, 10, 20]) +@pytest.mark.parametrize( + "csr_container, metric", + [(None, "minkowski"), (None, "euclidean")] + + [(container, "euclidean") for container in CSR_CONTAINERS], +) +def test_dbscan_optics_parity(eps, min_samples, metric, global_dtype, csr_container): + # Test that OPTICS clustering labels are <= 5% difference of DBSCAN + + centers = [[1, 1], [-1, -1], [1, -1]] + X, labels_true = make_blobs( + n_samples=150, centers=centers, cluster_std=0.4, random_state=0 + ) + X = csr_container(X) if csr_container is not None else X + + X = X.astype(global_dtype, copy=False) + + # calculate optics with dbscan extract at 0.3 epsilon + op = OPTICS( + min_samples=min_samples, cluster_method="dbscan", eps=eps, metric=metric + ).fit(X) + + # calculate dbscan labels + db = DBSCAN(eps=eps, min_samples=min_samples).fit(X) + + contingency = contingency_matrix(db.labels_, op.labels_) + agree = min( + np.sum(np.max(contingency, axis=0)), np.sum(np.max(contingency, axis=1)) + ) + disagree = X.shape[0] - agree + + percent_mismatch = np.round((disagree - 1) / X.shape[0], 2) + + # verify label mismatch is <= 5% labels + assert percent_mismatch <= 0.05 + + +def test_min_samples_edge_case(global_dtype): + C1 = [[0, 0], [0, 0.1], [0, -0.1]] + C2 = [[10, 10], [10, 9], [10, 11]] + C3 = [[100, 100], [100, 96], [100, 106]] + X = np.vstack((C1, C2, C3)).astype(global_dtype, copy=False) + + expected_labels = np.r_[[0] * 3, [1] * 3, [2] * 3] + clust = OPTICS(min_samples=3, max_eps=7, cluster_method="xi", xi=0.04).fit(X) + assert_array_equal(clust.labels_, expected_labels) + + expected_labels = np.r_[[0] * 3, [1] * 3, [-1] * 3] + clust = OPTICS(min_samples=3, max_eps=3, cluster_method="xi", xi=0.04).fit(X) + assert_array_equal(clust.labels_, expected_labels) + + expected_labels = np.r_[[-1] * 9] + with pytest.warns(UserWarning, match="All reachability values"): + clust = OPTICS(min_samples=4, max_eps=3, cluster_method="xi", xi=0.04).fit(X) + assert_array_equal(clust.labels_, expected_labels) + + +# try arbitrary minimum sizes +@pytest.mark.parametrize("min_cluster_size", range(2, X.shape[0] // 10, 23)) +def test_min_cluster_size(min_cluster_size, global_dtype): + redX = X[::2].astype(global_dtype, copy=False) # reduce for speed + clust = OPTICS(min_samples=9, min_cluster_size=min_cluster_size).fit(redX) + cluster_sizes = np.bincount(clust.labels_[clust.labels_ != -1]) + if cluster_sizes.size: + assert min(cluster_sizes) >= min_cluster_size + # check behaviour is the same when min_cluster_size is a fraction + clust_frac = OPTICS( + min_samples=9, + min_cluster_size=min_cluster_size / redX.shape[0], + ) + clust_frac.fit(redX) + assert_array_equal(clust.labels_, clust_frac.labels_) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_min_cluster_size_invalid2(csr_container): + clust = OPTICS(min_cluster_size=len(X) + 1) + with pytest.raises(ValueError, match="must be no greater than the "): + clust.fit(X) + + clust = OPTICS(min_cluster_size=len(X) + 1, metric="euclidean") + with pytest.raises(ValueError, match="must be no greater than the "): + clust.fit(csr_container(X)) + + +def test_processing_order(): + # Ensure that we consider all unprocessed points, + # not only direct neighbors. when picking the next point. + Y = [[0], [10], [-10], [25]] + + clust = OPTICS(min_samples=3, max_eps=15).fit(Y) + assert_array_equal(clust.reachability_, [np.inf, 10, 10, 15]) + assert_array_equal(clust.core_distances_, [10, 15, np.inf, np.inf]) + assert_array_equal(clust.ordering_, [0, 1, 2, 3]) + + +def test_compare_to_ELKI(): + # Expected values, computed with (future) ELKI 0.7.5 using: + # java -jar elki.jar cli -dbc.in csv -dbc.filter FixedDBIDsFilter + # -algorithm clustering.optics.OPTICSHeap -optics.minpts 5 + # where the FixedDBIDsFilter gives 0-indexed ids. + r1 = [ + np.inf, + 1.0574896366427478, + 0.7587934993548423, + 0.7290174038973836, + 0.7290174038973836, + 0.7290174038973836, + 0.6861627576116127, + 0.7587934993548423, + 0.9280118450166668, + 1.1748022534146194, + 3.3355455741292257, + 0.49618389254482587, + 0.2552805046961355, + 0.2552805046961355, + 0.24944622248445714, + 0.24944622248445714, + 0.24944622248445714, + 0.2552805046961355, + 0.2552805046961355, + 0.3086779122185853, + 4.163024452756142, + 1.623152630340929, + 0.45315840475822655, + 0.25468325192031926, + 0.2254004358159971, + 0.18765711877083036, + 0.1821471333893275, + 0.1821471333893275, + 0.18765711877083036, + 0.18765711877083036, + 0.2240202988740153, + 1.154337614548715, + 1.342604473837069, + 1.323308536402633, + 0.8607514948648837, + 0.27219111215810565, + 0.13260875220533205, + 0.13260875220533205, + 0.09890587675958984, + 0.09890587675958984, + 0.13548790801634494, + 0.1575483940837384, + 0.17515137170530226, + 0.17575920159442388, + 0.27219111215810565, + 0.6101447895405373, + 1.3189208094864302, + 1.323308536402633, + 2.2509184159764577, + 2.4517810628594527, + 3.675977064404973, + 3.8264795626020365, + 2.9130735341510614, + 2.9130735341510614, + 2.9130735341510614, + 2.9130735341510614, + 2.8459300127258036, + 2.8459300127258036, + 2.8459300127258036, + 3.0321982337972537, + ] + o1 = [ + 0, + 3, + 6, + 4, + 7, + 8, + 2, + 9, + 5, + 1, + 31, + 30, + 32, + 34, + 33, + 38, + 39, + 35, + 37, + 36, + 44, + 21, + 23, + 24, + 22, + 25, + 27, + 29, + 26, + 28, + 20, + 40, + 45, + 46, + 10, + 15, + 11, + 13, + 17, + 19, + 18, + 12, + 16, + 14, + 47, + 49, + 43, + 48, + 42, + 41, + 53, + 57, + 51, + 52, + 56, + 59, + 54, + 55, + 58, + 50, + ] + p1 = [ + -1, + 0, + 3, + 6, + 6, + 6, + 8, + 3, + 7, + 5, + 1, + 31, + 30, + 30, + 34, + 34, + 34, + 32, + 32, + 37, + 36, + 44, + 21, + 23, + 24, + 22, + 25, + 25, + 22, + 22, + 22, + 21, + 40, + 45, + 46, + 10, + 15, + 15, + 13, + 13, + 15, + 11, + 19, + 15, + 10, + 47, + 12, + 45, + 14, + 43, + 42, + 53, + 57, + 57, + 57, + 57, + 59, + 59, + 59, + 58, + ] + + # Tests against known extraction array + # Does NOT work with metric='euclidean', because sklearn euclidean has + # worse numeric precision. 'minkowski' is slower but more accurate. + clust1 = OPTICS(min_samples=5).fit(X) + + assert_array_equal(clust1.ordering_, np.array(o1)) + assert_array_equal(clust1.predecessor_[clust1.ordering_], np.array(p1)) + assert_allclose(clust1.reachability_[clust1.ordering_], np.array(r1)) + # ELKI currently does not print the core distances (which are not used much + # in literature, but we can at least ensure to have this consistency: + for i in clust1.ordering_[1:]: + assert clust1.reachability_[i] >= clust1.core_distances_[clust1.predecessor_[i]] + + # Expected values, computed with (future) ELKI 0.7.5 using + r2 = [ + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + 0.27219111215810565, + 0.13260875220533205, + 0.13260875220533205, + 0.09890587675958984, + 0.09890587675958984, + 0.13548790801634494, + 0.1575483940837384, + 0.17515137170530226, + 0.17575920159442388, + 0.27219111215810565, + 0.4928068613197889, + np.inf, + 0.2666183922512113, + 0.18765711877083036, + 0.1821471333893275, + 0.1821471333893275, + 0.1821471333893275, + 0.18715928772277457, + 0.18765711877083036, + 0.18765711877083036, + 0.25468325192031926, + np.inf, + 0.2552805046961355, + 0.2552805046961355, + 0.24944622248445714, + 0.24944622248445714, + 0.24944622248445714, + 0.2552805046961355, + 0.2552805046961355, + 0.3086779122185853, + 0.34466409325984865, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + ] + o2 = [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 15, + 11, + 13, + 17, + 19, + 18, + 12, + 16, + 14, + 47, + 46, + 20, + 22, + 25, + 23, + 27, + 29, + 24, + 26, + 28, + 21, + 30, + 32, + 34, + 33, + 38, + 39, + 35, + 37, + 36, + 31, + 40, + 41, + 42, + 43, + 44, + 45, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + ] + p2 = [ + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + 10, + 15, + 15, + 13, + 13, + 15, + 11, + 19, + 15, + 10, + 47, + -1, + 20, + 22, + 25, + 25, + 25, + 25, + 22, + 22, + 23, + -1, + 30, + 30, + 34, + 34, + 34, + 32, + 32, + 37, + 38, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + ] + clust2 = OPTICS(min_samples=5, max_eps=0.5).fit(X) + + assert_array_equal(clust2.ordering_, np.array(o2)) + assert_array_equal(clust2.predecessor_[clust2.ordering_], np.array(p2)) + assert_allclose(clust2.reachability_[clust2.ordering_], np.array(r2)) + + index = np.where(clust1.core_distances_ <= 0.5)[0] + assert_allclose(clust1.core_distances_[index], clust2.core_distances_[index]) + + +def test_extract_dbscan(global_dtype): + # testing an easy dbscan case. Not including clusters with different + # densities. + rng = np.random.RandomState(0) + n_points_per_cluster = 20 + C1 = [-5, -2] + 0.2 * rng.randn(n_points_per_cluster, 2) + C2 = [4, -1] + 0.2 * rng.randn(n_points_per_cluster, 2) + C3 = [1, 2] + 0.2 * rng.randn(n_points_per_cluster, 2) + C4 = [-2, 3] + 0.2 * rng.randn(n_points_per_cluster, 2) + X = np.vstack((C1, C2, C3, C4)).astype(global_dtype, copy=False) + + clust = OPTICS(cluster_method="dbscan", eps=0.5).fit(X) + assert_array_equal(np.sort(np.unique(clust.labels_)), [0, 1, 2, 3]) + + +@pytest.mark.parametrize("csr_container", [None] + CSR_CONTAINERS) +def test_precomputed_dists(global_dtype, csr_container): + redX = X[::2].astype(global_dtype, copy=False) + dists = pairwise_distances(redX, metric="euclidean") + dists = csr_container(dists) if csr_container is not None else dists + with warnings.catch_warnings(): + warnings.simplefilter("ignore", EfficiencyWarning) + clust1 = OPTICS(min_samples=10, algorithm="brute", metric="precomputed").fit( + dists + ) + clust2 = OPTICS(min_samples=10, algorithm="brute", metric="euclidean").fit(redX) + + assert_allclose(clust1.reachability_, clust2.reachability_) + assert_array_equal(clust1.labels_, clust2.labels_) + + +def test_optics_predecessor_correction_ordering(): + """Check that cluster correction using predecessor is working as expected. + + In the following example, the predecessor correction was not working properly + since it was not using the right indices. + + This non-regression test check that reordering the data does not change the results. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/26324 + """ + X_1 = np.array([1, 2, 3, 1, 8, 8, 7, 100]).reshape(-1, 1) + reorder = [0, 1, 2, 4, 5, 6, 7, 3] + X_2 = X_1[reorder] + + optics_1 = OPTICS(min_samples=3, metric="euclidean").fit(X_1) + optics_2 = OPTICS(min_samples=3, metric="euclidean").fit(X_2) + + assert_array_equal(optics_1.labels_[reorder], optics_2.labels_) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_spectral.py b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..682df64044bf9d97cd4a16f8f69a32f41084d3f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/tests/test_spectral.py @@ -0,0 +1,334 @@ +"""Testing for Spectral Clustering methods""" +import pickle +import re + +import numpy as np +import pytest +from scipy.linalg import LinAlgError + +from sklearn.cluster import SpectralClustering, spectral_clustering +from sklearn.cluster._spectral import cluster_qr, discretize +from sklearn.datasets import make_blobs +from sklearn.feature_extraction import img_to_graph +from sklearn.metrics import adjusted_rand_score +from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel +from sklearn.neighbors import NearestNeighbors +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_array_equal +from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS + +try: + from pyamg import smoothed_aggregation_solver # noqa + + amg_loaded = True +except ImportError: + amg_loaded = False + +centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 +X, _ = make_blobs( + n_samples=60, + n_features=2, + centers=centers, + cluster_std=0.4, + shuffle=True, + random_state=0, +) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +@pytest.mark.parametrize("eigen_solver", ("arpack", "lobpcg")) +@pytest.mark.parametrize("assign_labels", ("kmeans", "discretize", "cluster_qr")) +def test_spectral_clustering(eigen_solver, assign_labels, csr_container): + S = np.array( + [ + [1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0], + [1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0], + [1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0], + [0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], + ] + ) + + for mat in (S, csr_container(S)): + model = SpectralClustering( + random_state=0, + n_clusters=2, + affinity="precomputed", + eigen_solver=eigen_solver, + assign_labels=assign_labels, + ).fit(mat) + labels = model.labels_ + if labels[0] == 0: + labels = 1 - labels + + assert adjusted_rand_score(labels, [1, 1, 1, 0, 0, 0, 0]) == 1 + + model_copy = pickle.loads(pickle.dumps(model)) + assert model_copy.n_clusters == model.n_clusters + assert model_copy.eigen_solver == model.eigen_solver + assert_array_equal(model_copy.labels_, model.labels_) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +@pytest.mark.parametrize("assign_labels", ("kmeans", "discretize", "cluster_qr")) +def test_spectral_clustering_sparse(assign_labels, coo_container): + X, y = make_blobs( + n_samples=20, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 + ) + + S = rbf_kernel(X, gamma=1) + S = np.maximum(S - 1e-4, 0) + S = coo_container(S) + + labels = ( + SpectralClustering( + random_state=0, + n_clusters=2, + affinity="precomputed", + assign_labels=assign_labels, + ) + .fit(S) + .labels_ + ) + assert adjusted_rand_score(y, labels) == 1 + + +def test_precomputed_nearest_neighbors_filtering(): + # Test precomputed graph filtering when containing too many neighbors + X, y = make_blobs( + n_samples=200, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 + ) + + n_neighbors = 2 + results = [] + for additional_neighbors in [0, 10]: + nn = NearestNeighbors(n_neighbors=n_neighbors + additional_neighbors).fit(X) + graph = nn.kneighbors_graph(X, mode="connectivity") + labels = ( + SpectralClustering( + random_state=0, + n_clusters=2, + affinity="precomputed_nearest_neighbors", + n_neighbors=n_neighbors, + ) + .fit(graph) + .labels_ + ) + results.append(labels) + + assert_array_equal(results[0], results[1]) + + +def test_affinities(): + # Note: in the following, random_state has been selected to have + # a dataset that yields a stable eigen decomposition both when built + # on OSX and Linux + X, y = make_blobs( + n_samples=20, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 + ) + # nearest neighbors affinity + sp = SpectralClustering(n_clusters=2, affinity="nearest_neighbors", random_state=0) + with pytest.warns(UserWarning, match="not fully connected"): + sp.fit(X) + assert adjusted_rand_score(y, sp.labels_) == 1 + + sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0) + labels = sp.fit(X).labels_ + assert adjusted_rand_score(y, labels) == 1 + + X = check_random_state(10).rand(10, 5) * 10 + + kernels_available = kernel_metrics() + for kern in kernels_available: + # Additive chi^2 gives a negative similarity matrix which + # doesn't make sense for spectral clustering + if kern != "additive_chi2": + sp = SpectralClustering(n_clusters=2, affinity=kern, random_state=0) + labels = sp.fit(X).labels_ + assert (X.shape[0],) == labels.shape + + sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1, random_state=0) + labels = sp.fit(X).labels_ + assert (X.shape[0],) == labels.shape + + def histogram(x, y, **kwargs): + # Histogram kernel implemented as a callable. + assert kwargs == {} # no kernel_params that we didn't ask for + return np.minimum(x, y).sum() + + sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0) + labels = sp.fit(X).labels_ + assert (X.shape[0],) == labels.shape + + +def test_cluster_qr(): + # cluster_qr by itself should not be used for clustering generic data + # other than the rows of the eigenvectors within spectral clustering, + # but cluster_qr must still preserve the labels for different dtypes + # of the generic fixed input even if the labels may be meaningless. + random_state = np.random.RandomState(seed=8) + n_samples, n_components = 10, 5 + data = random_state.randn(n_samples, n_components) + labels_float64 = cluster_qr(data.astype(np.float64)) + # Each sample is assigned a cluster identifier + assert labels_float64.shape == (n_samples,) + # All components should be covered by the assignment + assert np.array_equal(np.unique(labels_float64), np.arange(n_components)) + # Single precision data should yield the same cluster assignments + labels_float32 = cluster_qr(data.astype(np.float32)) + assert np.array_equal(labels_float64, labels_float32) + + +def test_cluster_qr_permutation_invariance(): + # cluster_qr must be invariant to sample permutation. + random_state = np.random.RandomState(seed=8) + n_samples, n_components = 100, 5 + data = random_state.randn(n_samples, n_components) + perm = random_state.permutation(n_samples) + assert np.array_equal( + cluster_qr(data)[perm], + cluster_qr(data[perm]), + ) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +@pytest.mark.parametrize("n_samples", [50, 100, 150, 500]) +def test_discretize(n_samples, coo_container): + # Test the discretize using a noise assignment matrix + random_state = np.random.RandomState(seed=8) + for n_class in range(2, 10): + # random class labels + y_true = random_state.randint(0, n_class + 1, n_samples) + y_true = np.array(y_true, float) + # noise class assignment matrix + y_indicator = coo_container( + (np.ones(n_samples), (np.arange(n_samples), y_true)), + shape=(n_samples, n_class + 1), + ) + y_true_noisy = y_indicator.toarray() + 0.1 * random_state.randn( + n_samples, n_class + 1 + ) + y_pred = discretize(y_true_noisy, random_state=random_state) + assert adjusted_rand_score(y_true, y_pred) > 0.8 + + +# TODO: Remove when pyamg does replaces sp.rand call with np.random.rand +# https://github.com/scikit-learn/scikit-learn/issues/15913 +@pytest.mark.filterwarnings( + "ignore:scipy.rand is deprecated:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of np.float +@pytest.mark.filterwarnings( + "ignore:`np.float` is a deprecated alias:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of pinv2 +@pytest.mark.filterwarnings( + "ignore:scipy.linalg.pinv2 is deprecated:DeprecationWarning:pyamg.*" +) +# TODO: Remove when pyamg removes the use of np.find_common_type +@pytest.mark.filterwarnings( + "ignore:np.find_common_type is deprecated:DeprecationWarning:pyamg.*" +) +def test_spectral_clustering_with_arpack_amg_solvers(): + # Test that spectral_clustering is the same for arpack and amg solver + # Based on toy example from plot_segmentation_toy.py + + # a small two coin image + x, y = np.indices((40, 40)) + + center1, center2 = (14, 12), (20, 25) + radius1, radius2 = 8, 7 + + circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1**2 + circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2**2 + + circles = circle1 | circle2 + mask = circles.copy() + img = circles.astype(float) + + graph = img_to_graph(img, mask=mask) + graph.data = np.exp(-graph.data / graph.data.std()) + + labels_arpack = spectral_clustering( + graph, n_clusters=2, eigen_solver="arpack", random_state=0 + ) + + assert len(np.unique(labels_arpack)) == 2 + + if amg_loaded: + labels_amg = spectral_clustering( + graph, n_clusters=2, eigen_solver="amg", random_state=0 + ) + assert adjusted_rand_score(labels_arpack, labels_amg) == 1 + else: + with pytest.raises(ValueError): + spectral_clustering(graph, n_clusters=2, eigen_solver="amg", random_state=0) + + +def test_n_components(): + # Test that after adding n_components, result is different and + # n_components = n_clusters by default + X, y = make_blobs( + n_samples=20, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 + ) + sp = SpectralClustering(n_clusters=2, random_state=0) + labels = sp.fit(X).labels_ + # set n_components = n_cluster and test if result is the same + labels_same_ncomp = ( + SpectralClustering(n_clusters=2, n_components=2, random_state=0).fit(X).labels_ + ) + # test that n_components=n_clusters by default + assert_array_equal(labels, labels_same_ncomp) + + # test that n_components affect result + # n_clusters=8 by default, and set n_components=2 + labels_diff_ncomp = ( + SpectralClustering(n_components=2, random_state=0).fit(X).labels_ + ) + assert not np.array_equal(labels, labels_diff_ncomp) + + +@pytest.mark.parametrize("assign_labels", ("kmeans", "discretize", "cluster_qr")) +def test_verbose(assign_labels, capsys): + # Check verbose mode of KMeans for better coverage. + X, y = make_blobs( + n_samples=20, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01 + ) + + SpectralClustering(n_clusters=2, random_state=42, verbose=1).fit(X) + + captured = capsys.readouterr() + + assert re.search(r"Computing label assignment using", captured.out) + + if assign_labels == "kmeans": + assert re.search(r"Initialization complete", captured.out) + assert re.search(r"Iteration [0-9]+, inertia", captured.out) + + +def test_spectral_clustering_np_matrix_raises(): + """Check that spectral_clustering raises an informative error when passed + a np.matrix. See #10993""" + X = np.matrix([[0.0, 2.0], [2.0, 0.0]]) + + msg = r"np\.matrix is not supported. Please convert to a numpy array" + with pytest.raises(TypeError, match=msg): + spectral_clustering(X) + + +def test_spectral_clustering_not_infinite_loop(capsys, monkeypatch): + """Check that discretize raises LinAlgError when svd never converges. + + Non-regression test for #21380 + """ + + def new_svd(*args, **kwargs): + raise LinAlgError() + + monkeypatch.setattr(np.linalg, "svd", new_svd) + vectors = np.ones((10, 4)) + + with pytest.raises(LinAlgError, match="SVD did not converge"): + discretize(vectors) diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/__init__.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c730a71260808e76df302bda674232f06b53499e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/__init__.py @@ -0,0 +1,63 @@ +""" +The :mod:`sklearn.preprocessing` module includes scaling, centering, +normalization, binarization methods. +""" + +from ._data import ( + Binarizer, + KernelCenterer, + MaxAbsScaler, + MinMaxScaler, + Normalizer, + PowerTransformer, + QuantileTransformer, + RobustScaler, + StandardScaler, + add_dummy_feature, + binarize, + maxabs_scale, + minmax_scale, + normalize, + power_transform, + quantile_transform, + robust_scale, + scale, +) +from ._discretization import KBinsDiscretizer +from ._encoders import OneHotEncoder, OrdinalEncoder +from ._function_transformer import FunctionTransformer +from ._label import LabelBinarizer, LabelEncoder, MultiLabelBinarizer, label_binarize +from ._polynomial import PolynomialFeatures, SplineTransformer +from ._target_encoder import TargetEncoder + +__all__ = [ + "Binarizer", + "FunctionTransformer", + "KBinsDiscretizer", + "KernelCenterer", + "LabelBinarizer", + "LabelEncoder", + "MultiLabelBinarizer", + "MinMaxScaler", + "MaxAbsScaler", + "QuantileTransformer", + "Normalizer", + "OneHotEncoder", + "OrdinalEncoder", + "PowerTransformer", + "RobustScaler", + "SplineTransformer", + "StandardScaler", + "TargetEncoder", + "add_dummy_feature", + "PolynomialFeatures", + "binarize", + "normalize", + "scale", + "robust_scale", + "maxabs_scale", + "minmax_scale", + "label_binarize", + "quantile_transform", + "power_transform", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/_csr_polynomial_expansion.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_csr_polynomial_expansion.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e780835f806e98022563be901427d7f75c3bdb00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_csr_polynomial_expansion.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/_data.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_data.py new file mode 100644 index 0000000000000000000000000000000000000000..8ec8a840298f074f2556d375aba87e374906ddc9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_data.py @@ -0,0 +1,3618 @@ +# Authors: Alexandre Gramfort +# Mathieu Blondel +# Olivier Grisel +# Andreas Mueller +# Eric Martin +# Giorgio Patrini +# Eric Chang +# License: BSD 3 clause + + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import optimize, sparse, stats +from scipy.special import boxcox + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + OneToOneFeatureMixin, + TransformerMixin, + _fit_context, +) +from ..utils import _array_api, check_array +from ..utils._array_api import get_namespace +from ..utils._param_validation import Interval, Options, StrOptions, validate_params +from ..utils.extmath import _incremental_mean_and_var, row_norms +from ..utils.sparsefuncs import ( + incr_mean_variance_axis, + inplace_column_scale, + mean_variance_axis, + min_max_axis, +) +from ..utils.sparsefuncs_fast import ( + inplace_csr_row_normalize_l1, + inplace_csr_row_normalize_l2, +) +from ..utils.validation import ( + FLOAT_DTYPES, + _check_sample_weight, + check_is_fitted, + check_random_state, +) +from ._encoders import OneHotEncoder + +BOUNDS_THRESHOLD = 1e-7 + +__all__ = [ + "Binarizer", + "KernelCenterer", + "MinMaxScaler", + "MaxAbsScaler", + "Normalizer", + "OneHotEncoder", + "RobustScaler", + "StandardScaler", + "QuantileTransformer", + "PowerTransformer", + "add_dummy_feature", + "binarize", + "normalize", + "scale", + "robust_scale", + "maxabs_scale", + "minmax_scale", + "quantile_transform", + "power_transform", +] + + +def _is_constant_feature(var, mean, n_samples): + """Detect if a feature is indistinguishable from a constant feature. + + The detection is based on its computed variance and on the theoretical + error bounds of the '2 pass algorithm' for variance computation. + + See "Algorithms for computing the sample variance: analysis and + recommendations", by Chan, Golub, and LeVeque. + """ + # In scikit-learn, variance is always computed using float64 accumulators. + eps = np.finfo(np.float64).eps + + upper_bound = n_samples * eps * var + (n_samples * mean * eps) ** 2 + return var <= upper_bound + + +def _handle_zeros_in_scale(scale, copy=True, constant_mask=None): + """Set scales of near constant features to 1. + + The goal is to avoid division by very small or zero values. + + Near constant features are detected automatically by identifying + scales close to machine precision unless they are precomputed by + the caller and passed with the `constant_mask` kwarg. + + Typically for standard scaling, the scales are the standard + deviation while near constant features are better detected on the + computed variances which are closer to machine precision by + construction. + """ + # if we are fitting on 1D arrays, scale might be a scalar + if np.isscalar(scale): + if scale == 0.0: + scale = 1.0 + return scale + # scale is an array + else: + xp, _ = get_namespace(scale) + if constant_mask is None: + # Detect near constant values to avoid dividing by a very small + # value that could lead to surprising results and numerical + # stability issues. + constant_mask = scale < 10 * xp.finfo(scale.dtype).eps + + if copy: + # New array to avoid side-effects + scale = xp.asarray(scale, copy=True) + scale[constant_mask] = 1.0 + return scale + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "axis": [Options(Integral, {0, 1})], + "with_mean": ["boolean"], + "with_std": ["boolean"], + "copy": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def scale(X, *, axis=0, with_mean=True, with_std=True, copy=True): + """Standardize a dataset along any axis. + + Center to the mean and component wise scale to unit variance. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to center and scale. + + axis : {0, 1}, default=0 + Axis used to compute the means and standard deviations along. If 0, + independently standardize each feature, otherwise (if 1) standardize + each sample. + + with_mean : bool, default=True + If True, center the data before scaling. + + with_std : bool, default=True + If True, scale the data to unit variance (or equivalently, + unit standard deviation). + + copy : bool, default=True + If False, try to avoid a copy and scale in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + The transformed data. + + See Also + -------- + StandardScaler : Performs scaling to unit variance using the Transformer + API (e.g. as part of a preprocessing + :class:`~sklearn.pipeline.Pipeline`). + + Notes + ----- + This implementation will refuse to center scipy.sparse matrices + since it would make them non-sparse and would potentially crash the + program with memory exhaustion problems. + + Instead the caller is expected to either set explicitly + `with_mean=False` (in that case, only variance scaling will be + performed on the features of the CSC matrix) or to call `X.toarray()` + if he/she expects the materialized dense array to fit in memory. + + To avoid memory copy the caller should pass a CSC matrix. + + NaNs are treated as missing values: disregarded to compute the statistics, + and maintained during the data transformation. + + We use a biased estimator for the standard deviation, equivalent to + `numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to + affect model performance. + + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + .. warning:: Risk of data leak + + Do not use :func:`~sklearn.preprocessing.scale` unless you know + what you are doing. A common mistake is to apply it to the entire data + *before* splitting into training and test sets. This will bias the + model evaluation because information would have leaked from the test + set to the training set. + In general, we recommend using + :class:`~sklearn.preprocessing.StandardScaler` within a + :ref:`Pipeline ` in order to prevent most risks of data + leaking: `pipe = make_pipeline(StandardScaler(), LogisticRegression())`. + + Examples + -------- + >>> from sklearn.preprocessing import scale + >>> X = [[-2, 1, 2], [-1, 0, 1]] + >>> scale(X, axis=0) # scaling each column independently + array([[-1., 1., 1.], + [ 1., -1., -1.]]) + >>> scale(X, axis=1) # scaling each row independently + array([[-1.37..., 0.39..., 0.98...], + [-1.22..., 0. , 1.22...]]) + """ + X = check_array( + X, + accept_sparse="csc", + copy=copy, + ensure_2d=False, + estimator="the scale function", + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + if sparse.issparse(X): + if with_mean: + raise ValueError( + "Cannot center sparse matrices: pass `with_mean=False` instead" + " See docstring for motivation and alternatives." + ) + if axis != 0: + raise ValueError( + "Can only scale sparse matrix on axis=0, got axis=%d" % axis + ) + if with_std: + _, var = mean_variance_axis(X, axis=0) + var = _handle_zeros_in_scale(var, copy=False) + inplace_column_scale(X, 1 / np.sqrt(var)) + else: + X = np.asarray(X) + if with_mean: + mean_ = np.nanmean(X, axis) + if with_std: + scale_ = np.nanstd(X, axis) + # Xr is a view on the original array that enables easy use of + # broadcasting on the axis in which we are interested in + Xr = np.rollaxis(X, axis) + if with_mean: + Xr -= mean_ + mean_1 = np.nanmean(Xr, axis=0) + # Verify that mean_1 is 'close to zero'. If X contains very + # large values, mean_1 can also be very large, due to a lack of + # precision of mean_. In this case, a pre-scaling of the + # concerned feature is efficient, for instance by its mean or + # maximum. + if not np.allclose(mean_1, 0): + warnings.warn( + "Numerical issues were encountered " + "when centering the data " + "and might not be solved. Dataset may " + "contain too large values. You may need " + "to prescale your features." + ) + Xr -= mean_1 + if with_std: + scale_ = _handle_zeros_in_scale(scale_, copy=False) + Xr /= scale_ + if with_mean: + mean_2 = np.nanmean(Xr, axis=0) + # If mean_2 is not 'close to zero', it comes from the fact that + # scale_ is very small so that mean_2 = mean_1/scale_ > 0, even + # if mean_1 was close to zero. The problem is thus essentially + # due to the lack of precision of mean_. A solution is then to + # subtract the mean again: + if not np.allclose(mean_2, 0): + warnings.warn( + "Numerical issues were encountered " + "when scaling the data " + "and might not be solved. The standard " + "deviation of the data is probably " + "very close to 0. " + ) + Xr -= mean_2 + return X + + +class MinMaxScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Transform features by scaling each feature to a given range. + + This estimator scales and translates each feature individually such + that it is in the given range on the training set, e.g. between + zero and one. + + The transformation is given by:: + + X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) + X_scaled = X_std * (max - min) + min + + where min, max = feature_range. + + This transformation is often used as an alternative to zero mean, + unit variance scaling. + + `MinMaxScaler` doesn't reduce the effect of outliers, but it linearly + scales them down into a fixed range, where the largest occurring data point + corresponds to the maximum value and the smallest one corresponds to the + minimum value. For an example visualization, refer to :ref:`Compare + MinMaxScaler with other scalers `. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + feature_range : tuple (min, max), default=(0, 1) + Desired range of transformed data. + + copy : bool, default=True + Set to False to perform inplace row normalization and avoid a + copy (if the input is already a numpy array). + + clip : bool, default=False + Set to True to clip transformed values of held-out data to + provided `feature range`. + + .. versionadded:: 0.24 + + Attributes + ---------- + min_ : ndarray of shape (n_features,) + Per feature adjustment for minimum. Equivalent to + ``min - X.min(axis=0) * self.scale_`` + + scale_ : ndarray of shape (n_features,) + Per feature relative scaling of the data. Equivalent to + ``(max - min) / (X.max(axis=0) - X.min(axis=0))`` + + .. versionadded:: 0.17 + *scale_* attribute. + + data_min_ : ndarray of shape (n_features,) + Per feature minimum seen in the data + + .. versionadded:: 0.17 + *data_min_* + + data_max_ : ndarray of shape (n_features,) + Per feature maximum seen in the data + + .. versionadded:: 0.17 + *data_max_* + + data_range_ : ndarray of shape (n_features,) + Per feature range ``(data_max_ - data_min_)`` seen in the data + + .. versionadded:: 0.17 + *data_range_* + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + n_samples_seen_ : int + The number of samples processed by the estimator. + It will be reset on new calls to fit, but increments across + ``partial_fit`` calls. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + minmax_scale : Equivalent function without the estimator API. + + Notes + ----- + NaNs are treated as missing values: disregarded in fit, and maintained in + transform. + + Examples + -------- + >>> from sklearn.preprocessing import MinMaxScaler + >>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]] + >>> scaler = MinMaxScaler() + >>> print(scaler.fit(data)) + MinMaxScaler() + >>> print(scaler.data_max_) + [ 1. 18.] + >>> print(scaler.transform(data)) + [[0. 0. ] + [0.25 0.25] + [0.5 0.5 ] + [1. 1. ]] + >>> print(scaler.transform([[2, 2]])) + [[1.5 0. ]] + """ + + _parameter_constraints: dict = { + "feature_range": [tuple], + "copy": ["boolean"], + "clip": ["boolean"], + } + + def __init__(self, feature_range=(0, 1), *, copy=True, clip=False): + self.feature_range = feature_range + self.copy = copy + self.clip = clip + + def _reset(self): + """Reset internal data-dependent state of the scaler, if necessary. + + __init__ parameters are not touched. + """ + # Checking one attribute is enough, because they are all set together + # in partial_fit + if hasattr(self, "scale_"): + del self.scale_ + del self.min_ + del self.n_samples_seen_ + del self.data_min_ + del self.data_max_ + del self.data_range_ + + def fit(self, X, y=None): + """Compute the minimum and maximum to be used for later scaling. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data used to compute the per-feature minimum and maximum + used for later scaling along the features axis. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted scaler. + """ + # Reset internal state before fitting + self._reset() + return self.partial_fit(X, y) + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Online computation of min and max on X for later scaling. + + All of X is processed as a single batch. This is intended for cases + when :meth:`fit` is not feasible due to very large number of + `n_samples` or because X is read from a continuous stream. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data used to compute the mean and standard deviation + used for later scaling along the features axis. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted scaler. + """ + feature_range = self.feature_range + if feature_range[0] >= feature_range[1]: + raise ValueError( + "Minimum of desired feature range must be smaller than maximum. Got %s." + % str(feature_range) + ) + + if sparse.issparse(X): + raise TypeError( + "MinMaxScaler does not support sparse input. " + "Consider using MaxAbsScaler instead." + ) + + xp, _ = get_namespace(X) + + first_pass = not hasattr(self, "n_samples_seen_") + X = self._validate_data( + X, + reset=first_pass, + dtype=_array_api.supported_float_dtypes(xp), + force_all_finite="allow-nan", + ) + + data_min = _array_api._nanmin(X, axis=0) + data_max = _array_api._nanmax(X, axis=0) + + if first_pass: + self.n_samples_seen_ = X.shape[0] + else: + data_min = xp.minimum(self.data_min_, data_min) + data_max = xp.maximum(self.data_max_, data_max) + self.n_samples_seen_ += X.shape[0] + + data_range = data_max - data_min + self.scale_ = (feature_range[1] - feature_range[0]) / _handle_zeros_in_scale( + data_range, copy=True + ) + self.min_ = feature_range[0] - data_min * self.scale_ + self.data_min_ = data_min + self.data_max_ = data_max + self.data_range_ = data_range + return self + + def transform(self, X): + """Scale features of X according to feature_range. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data that will be transformed. + + Returns + ------- + Xt : ndarray of shape (n_samples, n_features) + Transformed data. + """ + check_is_fitted(self) + + xp, _ = get_namespace(X) + + X = self._validate_data( + X, + copy=self.copy, + dtype=_array_api.supported_float_dtypes(xp), + force_all_finite="allow-nan", + reset=False, + ) + + X *= self.scale_ + X += self.min_ + if self.clip: + xp.clip(X, self.feature_range[0], self.feature_range[1], out=X) + return X + + def inverse_transform(self, X): + """Undo the scaling of X according to feature_range. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data that will be transformed. It cannot be sparse. + + Returns + ------- + Xt : ndarray of shape (n_samples, n_features) + Transformed data. + """ + check_is_fitted(self) + + xp, _ = get_namespace(X) + + X = check_array( + X, + copy=self.copy, + dtype=_array_api.supported_float_dtypes(xp), + force_all_finite="allow-nan", + ) + + X -= self.min_ + X /= self.scale_ + return X + + def _more_tags(self): + return {"allow_nan": True} + + +@validate_params( + { + "X": ["array-like"], + "axis": [Options(Integral, {0, 1})], + }, + prefer_skip_nested_validation=False, +) +def minmax_scale(X, feature_range=(0, 1), *, axis=0, copy=True): + """Transform features by scaling each feature to a given range. + + This estimator scales and translates each feature individually such + that it is in the given range on the training set, i.e. between + zero and one. + + The transformation is given by (when ``axis=0``):: + + X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) + X_scaled = X_std * (max - min) + min + + where min, max = feature_range. + + The transformation is calculated as (when ``axis=0``):: + + X_scaled = scale * X + min - X.min(axis=0) * scale + where scale = (max - min) / (X.max(axis=0) - X.min(axis=0)) + + This transformation is often used as an alternative to zero mean, + unit variance scaling. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.17 + *minmax_scale* function interface + to :class:`~sklearn.preprocessing.MinMaxScaler`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + + feature_range : tuple (min, max), default=(0, 1) + Desired range of transformed data. + + axis : {0, 1}, default=0 + Axis used to scale along. If 0, independently scale each feature, + otherwise (if 1) scale each sample. + + copy : bool, default=True + If False, try to avoid a copy and scale in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + Returns + ------- + X_tr : ndarray of shape (n_samples, n_features) + The transformed data. + + .. warning:: Risk of data leak + + Do not use :func:`~sklearn.preprocessing.minmax_scale` unless you know + what you are doing. A common mistake is to apply it to the entire data + *before* splitting into training and test sets. This will bias the + model evaluation because information would have leaked from the test + set to the training set. + In general, we recommend using + :class:`~sklearn.preprocessing.MinMaxScaler` within a + :ref:`Pipeline ` in order to prevent most risks of data + leaking: `pipe = make_pipeline(MinMaxScaler(), LogisticRegression())`. + + See Also + -------- + MinMaxScaler : Performs scaling to a given range using the Transformer + API (e.g. as part of a preprocessing + :class:`~sklearn.pipeline.Pipeline`). + + Notes + ----- + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + Examples + -------- + >>> from sklearn.preprocessing import minmax_scale + >>> X = [[-2, 1, 2], [-1, 0, 1]] + >>> minmax_scale(X, axis=0) # scale each column independently + array([[0., 1., 1.], + [1., 0., 0.]]) + >>> minmax_scale(X, axis=1) # scale each row independently + array([[0. , 0.75, 1. ], + [0. , 0.5 , 1. ]]) + """ + # Unlike the scaler object, this function allows 1d input. + # If copy is required, it will be done inside the scaler object. + X = check_array( + X, copy=False, ensure_2d=False, dtype=FLOAT_DTYPES, force_all_finite="allow-nan" + ) + original_ndim = X.ndim + + if original_ndim == 1: + X = X.reshape(X.shape[0], 1) + + s = MinMaxScaler(feature_range=feature_range, copy=copy) + if axis == 0: + X = s.fit_transform(X) + else: + X = s.fit_transform(X.T).T + + if original_ndim == 1: + X = X.ravel() + + return X + + +class StandardScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Standardize features by removing the mean and scaling to unit variance. + + The standard score of a sample `x` is calculated as: + + z = (x - u) / s + + where `u` is the mean of the training samples or zero if `with_mean=False`, + and `s` is the standard deviation of the training samples or one if + `with_std=False`. + + Centering and scaling happen independently on each feature by computing + the relevant statistics on the samples in the training set. Mean and + standard deviation are then stored to be used on later data using + :meth:`transform`. + + Standardization of a dataset is a common requirement for many + machine learning estimators: they might behave badly if the + individual features do not more or less look like standard normally + distributed data (e.g. Gaussian with 0 mean and unit variance). + + For instance many elements used in the objective function of + a learning algorithm (such as the RBF kernel of Support Vector + Machines or the L1 and L2 regularizers of linear models) assume that + all features are centered around 0 and have variance in the same + order. If a feature has a variance that is orders of magnitude larger + than others, it might dominate the objective function and make the + estimator unable to learn from other features correctly as expected. + + `StandardScaler` is sensitive to outliers, and the features may scale + differently from each other in the presence of outliers. For an example + visualization, refer to :ref:`Compare StandardScaler with other scalers + `. + + This scaler can also be applied to sparse CSR or CSC matrices by passing + `with_mean=False` to avoid breaking the sparsity structure of the data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + copy : bool, default=True + If False, try to avoid a copy and do inplace scaling instead. + This is not guaranteed to always work inplace; e.g. if the data is + not a NumPy array or scipy.sparse CSR matrix, a copy may still be + returned. + + with_mean : bool, default=True + If True, center the data before scaling. + This does not work (and will raise an exception) when attempted on + sparse matrices, because centering them entails building a dense + matrix which in common use cases is likely to be too large to fit in + memory. + + with_std : bool, default=True + If True, scale the data to unit variance (or equivalently, + unit standard deviation). + + Attributes + ---------- + scale_ : ndarray of shape (n_features,) or None + Per feature relative scaling of the data to achieve zero mean and unit + variance. Generally this is calculated using `np.sqrt(var_)`. If a + variance is zero, we can't achieve unit variance, and the data is left + as-is, giving a scaling factor of 1. `scale_` is equal to `None` + when `with_std=False`. + + .. versionadded:: 0.17 + *scale_* + + mean_ : ndarray of shape (n_features,) or None + The mean value for each feature in the training set. + Equal to ``None`` when ``with_mean=False`` and ``with_std=False``. + + var_ : ndarray of shape (n_features,) or None + The variance for each feature in the training set. Used to compute + `scale_`. Equal to ``None`` when ``with_mean=False`` and + ``with_std=False``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_seen_ : int or ndarray of shape (n_features,) + The number of samples processed by the estimator for each feature. + If there are no missing samples, the ``n_samples_seen`` will be an + integer, otherwise it will be an array of dtype int. If + `sample_weights` are used it will be a float (if no missing data) + or an array of dtype float that sums the weights seen so far. + Will be reset on new calls to fit, but increments across + ``partial_fit`` calls. + + See Also + -------- + scale : Equivalent function without the estimator API. + + :class:`~sklearn.decomposition.PCA` : Further removes the linear + correlation across features with 'whiten=True'. + + Notes + ----- + NaNs are treated as missing values: disregarded in fit, and maintained in + transform. + + We use a biased estimator for the standard deviation, equivalent to + `numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to + affect model performance. + + Examples + -------- + >>> from sklearn.preprocessing import StandardScaler + >>> data = [[0, 0], [0, 0], [1, 1], [1, 1]] + >>> scaler = StandardScaler() + >>> print(scaler.fit(data)) + StandardScaler() + >>> print(scaler.mean_) + [0.5 0.5] + >>> print(scaler.transform(data)) + [[-1. -1.] + [-1. -1.] + [ 1. 1.] + [ 1. 1.]] + >>> print(scaler.transform([[2, 2]])) + [[3. 3.]] + """ + + _parameter_constraints: dict = { + "copy": ["boolean"], + "with_mean": ["boolean"], + "with_std": ["boolean"], + } + + def __init__(self, *, copy=True, with_mean=True, with_std=True): + self.with_mean = with_mean + self.with_std = with_std + self.copy = copy + + def _reset(self): + """Reset internal data-dependent state of the scaler, if necessary. + + __init__ parameters are not touched. + """ + # Checking one attribute is enough, because they are all set together + # in partial_fit + if hasattr(self, "scale_"): + del self.scale_ + del self.n_samples_seen_ + del self.mean_ + del self.var_ + + def fit(self, X, y=None, sample_weight=None): + """Compute the mean and std to be used for later scaling. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to compute the mean and standard deviation + used for later scaling along the features axis. + + y : None + Ignored. + + sample_weight : array-like of shape (n_samples,), default=None + Individual weights for each sample. + + .. versionadded:: 0.24 + parameter *sample_weight* support to StandardScaler. + + Returns + ------- + self : object + Fitted scaler. + """ + # Reset internal state before fitting + self._reset() + return self.partial_fit(X, y, sample_weight) + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, sample_weight=None): + """Online computation of mean and std on X for later scaling. + + All of X is processed as a single batch. This is intended for cases + when :meth:`fit` is not feasible due to very large number of + `n_samples` or because X is read from a continuous stream. + + The algorithm for incremental mean and std is given in Equation 1.5a,b + in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms + for computing the sample variance: Analysis and recommendations." + The American Statistician 37.3 (1983): 242-247: + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to compute the mean and standard deviation + used for later scaling along the features axis. + + y : None + Ignored. + + sample_weight : array-like of shape (n_samples,), default=None + Individual weights for each sample. + + .. versionadded:: 0.24 + parameter *sample_weight* support to StandardScaler. + + Returns + ------- + self : object + Fitted scaler. + """ + first_call = not hasattr(self, "n_samples_seen_") + X = self._validate_data( + X, + accept_sparse=("csr", "csc"), + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + reset=first_call, + ) + n_features = X.shape[1] + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + # Even in the case of `with_mean=False`, we update the mean anyway + # This is needed for the incremental computation of the var + # See incr_mean_variance_axis and _incremental_mean_variance_axis + + # if n_samples_seen_ is an integer (i.e. no missing values), we need to + # transform it to a NumPy array of shape (n_features,) required by + # incr_mean_variance_axis and _incremental_variance_axis + dtype = np.int64 if sample_weight is None else X.dtype + if not hasattr(self, "n_samples_seen_"): + self.n_samples_seen_ = np.zeros(n_features, dtype=dtype) + elif np.size(self.n_samples_seen_) == 1: + self.n_samples_seen_ = np.repeat(self.n_samples_seen_, X.shape[1]) + self.n_samples_seen_ = self.n_samples_seen_.astype(dtype, copy=False) + + if sparse.issparse(X): + if self.with_mean: + raise ValueError( + "Cannot center sparse matrices: pass `with_mean=False` " + "instead. See docstring for motivation and alternatives." + ) + sparse_constructor = ( + sparse.csr_matrix if X.format == "csr" else sparse.csc_matrix + ) + + if self.with_std: + # First pass + if not hasattr(self, "scale_"): + self.mean_, self.var_, self.n_samples_seen_ = mean_variance_axis( + X, axis=0, weights=sample_weight, return_sum_weights=True + ) + # Next passes + else: + ( + self.mean_, + self.var_, + self.n_samples_seen_, + ) = incr_mean_variance_axis( + X, + axis=0, + last_mean=self.mean_, + last_var=self.var_, + last_n=self.n_samples_seen_, + weights=sample_weight, + ) + # We force the mean and variance to float64 for large arrays + # See https://github.com/scikit-learn/scikit-learn/pull/12338 + self.mean_ = self.mean_.astype(np.float64, copy=False) + self.var_ = self.var_.astype(np.float64, copy=False) + else: + self.mean_ = None # as with_mean must be False for sparse + self.var_ = None + weights = _check_sample_weight(sample_weight, X) + sum_weights_nan = weights @ sparse_constructor( + (np.isnan(X.data), X.indices, X.indptr), shape=X.shape + ) + self.n_samples_seen_ += (np.sum(weights) - sum_weights_nan).astype( + dtype + ) + else: + # First pass + if not hasattr(self, "scale_"): + self.mean_ = 0.0 + if self.with_std: + self.var_ = 0.0 + else: + self.var_ = None + + if not self.with_mean and not self.with_std: + self.mean_ = None + self.var_ = None + self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0) + + else: + self.mean_, self.var_, self.n_samples_seen_ = _incremental_mean_and_var( + X, + self.mean_, + self.var_, + self.n_samples_seen_, + sample_weight=sample_weight, + ) + + # for backward-compatibility, reduce n_samples_seen_ to an integer + # if the number of samples is the same for each feature (i.e. no + # missing values) + if np.ptp(self.n_samples_seen_) == 0: + self.n_samples_seen_ = self.n_samples_seen_[0] + + if self.with_std: + # Extract the list of near constant features on the raw variances, + # before taking the square root. + constant_mask = _is_constant_feature( + self.var_, self.mean_, self.n_samples_seen_ + ) + self.scale_ = _handle_zeros_in_scale( + np.sqrt(self.var_), copy=False, constant_mask=constant_mask + ) + else: + self.scale_ = None + + return self + + def transform(self, X, copy=None): + """Perform standardization by centering and scaling. + + Parameters + ---------- + X : {array-like, sparse matrix of shape (n_samples, n_features) + The data used to scale along the features axis. + copy : bool, default=None + Copy the input X or not. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + check_is_fitted(self) + + copy = copy if copy is not None else self.copy + X = self._validate_data( + X, + reset=False, + accept_sparse="csr", + copy=copy, + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + if self.with_mean: + raise ValueError( + "Cannot center sparse matrices: pass `with_mean=False` " + "instead. See docstring for motivation and alternatives." + ) + if self.scale_ is not None: + inplace_column_scale(X, 1 / self.scale_) + else: + if self.with_mean: + X -= self.mean_ + if self.with_std: + X /= self.scale_ + return X + + def inverse_transform(self, X, copy=None): + """Scale back the data to the original representation. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to scale along the features axis. + copy : bool, default=None + Copy the input X or not. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + check_is_fitted(self) + + copy = copy if copy is not None else self.copy + X = check_array( + X, + accept_sparse="csr", + copy=copy, + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + if self.with_mean: + raise ValueError( + "Cannot uncenter sparse matrices: pass `with_mean=False` " + "instead See docstring for motivation and alternatives." + ) + if self.scale_ is not None: + inplace_column_scale(X, self.scale_) + else: + if self.with_std: + X *= self.scale_ + if self.with_mean: + X += self.mean_ + return X + + def _more_tags(self): + return {"allow_nan": True, "preserves_dtype": [np.float64, np.float32]} + + +class MaxAbsScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Scale each feature by its maximum absolute value. + + This estimator scales and translates each feature individually such + that the maximal absolute value of each feature in the + training set will be 1.0. It does not shift/center the data, and + thus does not destroy any sparsity. + + This scaler can also be applied to sparse CSR or CSC matrices. + + `MaxAbsScaler` doesn't reduce the effect of outliers; it only linearly + scales them down. For an example visualization, refer to :ref:`Compare + MaxAbsScaler with other scalers `. + + .. versionadded:: 0.17 + + Parameters + ---------- + copy : bool, default=True + Set to False to perform inplace scaling and avoid a copy (if the input + is already a numpy array). + + Attributes + ---------- + scale_ : ndarray of shape (n_features,) + Per feature relative scaling of the data. + + .. versionadded:: 0.17 + *scale_* attribute. + + max_abs_ : ndarray of shape (n_features,) + Per feature maximum absolute value. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_seen_ : int + The number of samples processed by the estimator. Will be reset on + new calls to fit, but increments across ``partial_fit`` calls. + + See Also + -------- + maxabs_scale : Equivalent function without the estimator API. + + Notes + ----- + NaNs are treated as missing values: disregarded in fit, and maintained in + transform. + + Examples + -------- + >>> from sklearn.preprocessing import MaxAbsScaler + >>> X = [[ 1., -1., 2.], + ... [ 2., 0., 0.], + ... [ 0., 1., -1.]] + >>> transformer = MaxAbsScaler().fit(X) + >>> transformer + MaxAbsScaler() + >>> transformer.transform(X) + array([[ 0.5, -1. , 1. ], + [ 1. , 0. , 0. ], + [ 0. , 1. , -0.5]]) + """ + + _parameter_constraints: dict = {"copy": ["boolean"]} + + def __init__(self, *, copy=True): + self.copy = copy + + def _reset(self): + """Reset internal data-dependent state of the scaler, if necessary. + + __init__ parameters are not touched. + """ + # Checking one attribute is enough, because they are all set together + # in partial_fit + if hasattr(self, "scale_"): + del self.scale_ + del self.n_samples_seen_ + del self.max_abs_ + + def fit(self, X, y=None): + """Compute the maximum absolute value to be used for later scaling. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to compute the per-feature minimum and maximum + used for later scaling along the features axis. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted scaler. + """ + # Reset internal state before fitting + self._reset() + return self.partial_fit(X, y) + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Online computation of max absolute value of X for later scaling. + + All of X is processed as a single batch. This is intended for cases + when :meth:`fit` is not feasible due to very large number of + `n_samples` or because X is read from a continuous stream. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to compute the mean and standard deviation + used for later scaling along the features axis. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted scaler. + """ + xp, _ = get_namespace(X) + + first_pass = not hasattr(self, "n_samples_seen_") + X = self._validate_data( + X, + reset=first_pass, + accept_sparse=("csr", "csc"), + dtype=_array_api.supported_float_dtypes(xp), + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + mins, maxs = min_max_axis(X, axis=0, ignore_nan=True) + max_abs = np.maximum(np.abs(mins), np.abs(maxs)) + else: + max_abs = _array_api._nanmax(xp.abs(X), axis=0) + + if first_pass: + self.n_samples_seen_ = X.shape[0] + else: + max_abs = xp.maximum(self.max_abs_, max_abs) + self.n_samples_seen_ += X.shape[0] + + self.max_abs_ = max_abs + self.scale_ = _handle_zeros_in_scale(max_abs, copy=True) + return self + + def transform(self, X): + """Scale the data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data that should be scaled. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + check_is_fitted(self) + + xp, _ = get_namespace(X) + + X = self._validate_data( + X, + accept_sparse=("csr", "csc"), + copy=self.copy, + reset=False, + dtype=_array_api.supported_float_dtypes(xp), + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + inplace_column_scale(X, 1.0 / self.scale_) + else: + X /= self.scale_ + return X + + def inverse_transform(self, X): + """Scale back the data to the original representation. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data that should be transformed back. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + check_is_fitted(self) + + xp, _ = get_namespace(X) + + X = check_array( + X, + accept_sparse=("csr", "csc"), + copy=self.copy, + dtype=_array_api.supported_float_dtypes(xp), + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + inplace_column_scale(X, self.scale_) + else: + X *= self.scale_ + return X + + def _more_tags(self): + return {"allow_nan": True} + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "axis": [Options(Integral, {0, 1})], + }, + prefer_skip_nested_validation=False, +) +def maxabs_scale(X, *, axis=0, copy=True): + """Scale each feature to the [-1, 1] range without breaking the sparsity. + + This estimator scales each feature individually such + that the maximal absolute value of each feature in the + training set will be 1.0. + + This scaler can also be applied to sparse CSR or CSC matrices. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data. + + axis : {0, 1}, default=0 + Axis used to scale along. If 0, independently scale each feature, + otherwise (if 1) scale each sample. + + copy : bool, default=True + If False, try to avoid a copy and scale in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + The transformed data. + + .. warning:: Risk of data leak + + Do not use :func:`~sklearn.preprocessing.maxabs_scale` unless you know + what you are doing. A common mistake is to apply it to the entire data + *before* splitting into training and test sets. This will bias the + model evaluation because information would have leaked from the test + set to the training set. + In general, we recommend using + :class:`~sklearn.preprocessing.MaxAbsScaler` within a + :ref:`Pipeline ` in order to prevent most risks of data + leaking: `pipe = make_pipeline(MaxAbsScaler(), LogisticRegression())`. + + See Also + -------- + MaxAbsScaler : Performs scaling to the [-1, 1] range using + the Transformer API (e.g. as part of a preprocessing + :class:`~sklearn.pipeline.Pipeline`). + + Notes + ----- + NaNs are treated as missing values: disregarded to compute the statistics, + and maintained during the data transformation. + + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + Examples + -------- + >>> from sklearn.preprocessing import maxabs_scale + >>> X = [[-2, 1, 2], [-1, 0, 1]] + >>> maxabs_scale(X, axis=0) # scale each column independently + array([[-1. , 1. , 1. ], + [-0.5, 0. , 0.5]]) + >>> maxabs_scale(X, axis=1) # scale each row independently + array([[-1. , 0.5, 1. ], + [-1. , 0. , 1. ]]) + """ + # Unlike the scaler object, this function allows 1d input. + + # If copy is required, it will be done inside the scaler object. + X = check_array( + X, + accept_sparse=("csr", "csc"), + copy=False, + ensure_2d=False, + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + original_ndim = X.ndim + + if original_ndim == 1: + X = X.reshape(X.shape[0], 1) + + s = MaxAbsScaler(copy=copy) + if axis == 0: + X = s.fit_transform(X) + else: + X = s.fit_transform(X.T).T + + if original_ndim == 1: + X = X.ravel() + + return X + + +class RobustScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Scale features using statistics that are robust to outliers. + + This Scaler removes the median and scales the data according to + the quantile range (defaults to IQR: Interquartile Range). + The IQR is the range between the 1st quartile (25th quantile) + and the 3rd quartile (75th quantile). + + Centering and scaling happen independently on each feature by + computing the relevant statistics on the samples in the training + set. Median and interquartile range are then stored to be used on + later data using the :meth:`transform` method. + + Standardization of a dataset is a common preprocessing for many machine + learning estimators. Typically this is done by removing the mean and + scaling to unit variance. However, outliers can often influence the sample + mean / variance in a negative way. In such cases, using the median and the + interquartile range often give better results. For an example visualization + and comparison to other scalers, refer to :ref:`Compare RobustScaler with + other scalers `. + + .. versionadded:: 0.17 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + with_centering : bool, default=True + If `True`, center the data before scaling. + This will cause :meth:`transform` to raise an exception when attempted + on sparse matrices, because centering them entails building a dense + matrix which in common use cases is likely to be too large to fit in + memory. + + with_scaling : bool, default=True + If `True`, scale the data to interquartile range. + + quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0, \ + default=(25.0, 75.0) + Quantile range used to calculate `scale_`. By default this is equal to + the IQR, i.e., `q_min` is the first quantile and `q_max` is the third + quantile. + + .. versionadded:: 0.18 + + copy : bool, default=True + If `False`, try to avoid a copy and do inplace scaling instead. + This is not guaranteed to always work inplace; e.g. if the data is + not a NumPy array or scipy.sparse CSR matrix, a copy may still be + returned. + + unit_variance : bool, default=False + If `True`, scale data so that normally distributed features have a + variance of 1. In general, if the difference between the x-values of + `q_max` and `q_min` for a standard normal distribution is greater + than 1, the dataset will be scaled down. If less than 1, the dataset + will be scaled up. + + .. versionadded:: 0.24 + + Attributes + ---------- + center_ : array of floats + The median value for each feature in the training set. + + scale_ : array of floats + The (scaled) interquartile range for each feature in the training set. + + .. versionadded:: 0.17 + *scale_* attribute. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + robust_scale : Equivalent function without the estimator API. + sklearn.decomposition.PCA : Further removes the linear correlation across + features with 'whiten=True'. + + Notes + ----- + + https://en.wikipedia.org/wiki/Median + https://en.wikipedia.org/wiki/Interquartile_range + + Examples + -------- + >>> from sklearn.preprocessing import RobustScaler + >>> X = [[ 1., -2., 2.], + ... [ -2., 1., 3.], + ... [ 4., 1., -2.]] + >>> transformer = RobustScaler().fit(X) + >>> transformer + RobustScaler() + >>> transformer.transform(X) + array([[ 0. , -2. , 0. ], + [-1. , 0. , 0.4], + [ 1. , 0. , -1.6]]) + """ + + _parameter_constraints: dict = { + "with_centering": ["boolean"], + "with_scaling": ["boolean"], + "quantile_range": [tuple], + "copy": ["boolean"], + "unit_variance": ["boolean"], + } + + def __init__( + self, + *, + with_centering=True, + with_scaling=True, + quantile_range=(25.0, 75.0), + copy=True, + unit_variance=False, + ): + self.with_centering = with_centering + self.with_scaling = with_scaling + self.quantile_range = quantile_range + self.unit_variance = unit_variance + self.copy = copy + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Compute the median and quantiles to be used for scaling. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to compute the median and quantiles + used for later scaling along the features axis. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Fitted scaler. + """ + # at fit, convert sparse matrices to csc for optimized computation of + # the quantiles + X = self._validate_data( + X, + accept_sparse="csc", + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + + q_min, q_max = self.quantile_range + if not 0 <= q_min <= q_max <= 100: + raise ValueError("Invalid quantile range: %s" % str(self.quantile_range)) + + if self.with_centering: + if sparse.issparse(X): + raise ValueError( + "Cannot center sparse matrices: use `with_centering=False`" + " instead. See docstring for motivation and alternatives." + ) + self.center_ = np.nanmedian(X, axis=0) + else: + self.center_ = None + + if self.with_scaling: + quantiles = [] + for feature_idx in range(X.shape[1]): + if sparse.issparse(X): + column_nnz_data = X.data[ + X.indptr[feature_idx] : X.indptr[feature_idx + 1] + ] + column_data = np.zeros(shape=X.shape[0], dtype=X.dtype) + column_data[: len(column_nnz_data)] = column_nnz_data + else: + column_data = X[:, feature_idx] + + quantiles.append(np.nanpercentile(column_data, self.quantile_range)) + + quantiles = np.transpose(quantiles) + + self.scale_ = quantiles[1] - quantiles[0] + self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False) + if self.unit_variance: + adjust = stats.norm.ppf(q_max / 100.0) - stats.norm.ppf(q_min / 100.0) + self.scale_ = self.scale_ / adjust + else: + self.scale_ = None + + return self + + def transform(self, X): + """Center and scale the data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to scale along the specified axis. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=("csr", "csc"), + copy=self.copy, + dtype=FLOAT_DTYPES, + reset=False, + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + if self.with_scaling: + inplace_column_scale(X, 1.0 / self.scale_) + else: + if self.with_centering: + X -= self.center_ + if self.with_scaling: + X /= self.scale_ + return X + + def inverse_transform(self, X): + """Scale back the data to the original representation. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The rescaled data to be transformed back. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + check_is_fitted(self) + X = check_array( + X, + accept_sparse=("csr", "csc"), + copy=self.copy, + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + if self.with_scaling: + inplace_column_scale(X, self.scale_) + else: + if self.with_scaling: + X *= self.scale_ + if self.with_centering: + X += self.center_ + return X + + def _more_tags(self): + return {"allow_nan": True} + + +@validate_params( + {"X": ["array-like", "sparse matrix"], "axis": [Options(Integral, {0, 1})]}, + prefer_skip_nested_validation=False, +) +def robust_scale( + X, + *, + axis=0, + with_centering=True, + with_scaling=True, + quantile_range=(25.0, 75.0), + copy=True, + unit_variance=False, +): + """Standardize a dataset along any axis. + + Center to the median and component wise scale + according to the interquartile range. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_sample, n_features) + The data to center and scale. + + axis : int, default=0 + Axis used to compute the medians and IQR along. If 0, + independently scale each feature, otherwise (if 1) scale + each sample. + + with_centering : bool, default=True + If `True`, center the data before scaling. + + with_scaling : bool, default=True + If `True`, scale the data to unit variance (or equivalently, + unit standard deviation). + + quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0,\ + default=(25.0, 75.0) + Quantile range used to calculate `scale_`. By default this is equal to + the IQR, i.e., `q_min` is the first quantile and `q_max` is the third + quantile. + + .. versionadded:: 0.18 + + copy : bool, default=True + If False, try to avoid a copy and scale in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + unit_variance : bool, default=False + If `True`, scale data so that normally distributed features have a + variance of 1. In general, if the difference between the x-values of + `q_max` and `q_min` for a standard normal distribution is greater + than 1, the dataset will be scaled down. If less than 1, the dataset + will be scaled up. + + .. versionadded:: 0.24 + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + The transformed data. + + See Also + -------- + RobustScaler : Performs centering and scaling using the Transformer API + (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). + + Notes + ----- + This implementation will refuse to center scipy.sparse matrices + since it would make them non-sparse and would potentially crash the + program with memory exhaustion problems. + + Instead the caller is expected to either set explicitly + `with_centering=False` (in that case, only variance scaling will be + performed on the features of the CSR matrix) or to call `X.toarray()` + if he/she expects the materialized dense array to fit in memory. + + To avoid memory copy the caller should pass a CSR matrix. + + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + .. warning:: Risk of data leak + + Do not use :func:`~sklearn.preprocessing.robust_scale` unless you know + what you are doing. A common mistake is to apply it to the entire data + *before* splitting into training and test sets. This will bias the + model evaluation because information would have leaked from the test + set to the training set. + In general, we recommend using + :class:`~sklearn.preprocessing.RobustScaler` within a + :ref:`Pipeline ` in order to prevent most risks of data + leaking: `pipe = make_pipeline(RobustScaler(), LogisticRegression())`. + + Examples + -------- + >>> from sklearn.preprocessing import robust_scale + >>> X = [[-2, 1, 2], [-1, 0, 1]] + >>> robust_scale(X, axis=0) # scale each column independently + array([[-1., 1., 1.], + [ 1., -1., -1.]]) + >>> robust_scale(X, axis=1) # scale each row independently + array([[-1.5, 0. , 0.5], + [-1. , 0. , 1. ]]) + """ + X = check_array( + X, + accept_sparse=("csr", "csc"), + copy=False, + ensure_2d=False, + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + original_ndim = X.ndim + + if original_ndim == 1: + X = X.reshape(X.shape[0], 1) + + s = RobustScaler( + with_centering=with_centering, + with_scaling=with_scaling, + quantile_range=quantile_range, + unit_variance=unit_variance, + copy=copy, + ) + if axis == 0: + X = s.fit_transform(X) + else: + X = s.fit_transform(X.T).T + + if original_ndim == 1: + X = X.ravel() + + return X + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "norm": [StrOptions({"l1", "l2", "max"})], + "axis": [Options(Integral, {0, 1})], + "copy": ["boolean"], + "return_norm": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def normalize(X, norm="l2", *, axis=1, copy=True, return_norm=False): + """Scale input vectors individually to unit norm (vector length). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to normalize, element by element. + scipy.sparse matrices should be in CSR format to avoid an + un-necessary copy. + + norm : {'l1', 'l2', 'max'}, default='l2' + The norm to use to normalize each non zero sample (or each non-zero + feature if axis is 0). + + axis : {0, 1}, default=1 + Define axis used to normalize the data along. If 1, independently + normalize each sample, otherwise (if 0) normalize each feature. + + copy : bool, default=True + If False, try to avoid a copy and normalize in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + return_norm : bool, default=False + Whether to return the computed norms. + + Returns + ------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Normalized input X. + + norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, ) + An array of norms along given axis for X. + When X is sparse, a NotImplementedError will be raised + for norm 'l1' or 'l2'. + + See Also + -------- + Normalizer : Performs normalization using the Transformer API + (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). + + Notes + ----- + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + Examples + -------- + >>> from sklearn.preprocessing import normalize + >>> X = [[-2, 1, 2], [-1, 0, 1]] + >>> normalize(X, norm="l1") # L1 normalization each row independently + array([[-0.4, 0.2, 0.4], + [-0.5, 0. , 0.5]]) + >>> normalize(X, norm="l2") # L2 normalization each row independently + array([[-0.66..., 0.33..., 0.66...], + [-0.70..., 0. , 0.70...]]) + """ + if axis == 0: + sparse_format = "csc" + else: # axis == 1: + sparse_format = "csr" + + xp, _ = get_namespace(X) + + X = check_array( + X, + accept_sparse=sparse_format, + copy=copy, + estimator="the normalize function", + dtype=_array_api.supported_float_dtypes(xp), + ) + if axis == 0: + X = X.T + + if sparse.issparse(X): + if return_norm and norm in ("l1", "l2"): + raise NotImplementedError( + "return_norm=True is not implemented " + "for sparse matrices with norm 'l1' " + "or norm 'l2'" + ) + if norm == "l1": + inplace_csr_row_normalize_l1(X) + elif norm == "l2": + inplace_csr_row_normalize_l2(X) + elif norm == "max": + mins, maxes = min_max_axis(X, 1) + norms = np.maximum(abs(mins), maxes) + norms_elementwise = norms.repeat(np.diff(X.indptr)) + mask = norms_elementwise != 0 + X.data[mask] /= norms_elementwise[mask] + else: + if norm == "l1": + norms = xp.sum(xp.abs(X), axis=1) + elif norm == "l2": + norms = row_norms(X) + elif norm == "max": + norms = xp.max(xp.abs(X), axis=1) + norms = _handle_zeros_in_scale(norms, copy=False) + X /= norms[:, None] + + if axis == 0: + X = X.T + + if return_norm: + return X, norms + else: + return X + + +class Normalizer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Normalize samples individually to unit norm. + + Each sample (i.e. each row of the data matrix) with at least one + non zero component is rescaled independently of other samples so + that its norm (l1, l2 or inf) equals one. + + This transformer is able to work both with dense numpy arrays and + scipy.sparse matrix (use CSR format if you want to avoid the burden of + a copy / conversion). + + Scaling inputs to unit norms is a common operation for text + classification or clustering for instance. For instance the dot + product of two l2-normalized TF-IDF vectors is the cosine similarity + of the vectors and is the base similarity metric for the Vector + Space Model commonly used by the Information Retrieval community. + + For an example visualization, refer to :ref:`Compare Normalizer with other + scalers `. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + norm : {'l1', 'l2', 'max'}, default='l2' + The norm to use to normalize each non zero sample. If norm='max' + is used, values will be rescaled by the maximum of the absolute + values. + + copy : bool, default=True + Set to False to perform inplace row normalization and avoid a + copy (if the input is already a numpy array or a scipy.sparse + CSR matrix). + + Attributes + ---------- + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + normalize : Equivalent function without the estimator API. + + Notes + ----- + This estimator is :term:`stateless` and does not need to be fitted. + However, we recommend to call :meth:`fit_transform` instead of + :meth:`transform`, as parameter validation is only performed in + :meth:`fit`. + + Examples + -------- + >>> from sklearn.preprocessing import Normalizer + >>> X = [[4, 1, 2, 2], + ... [1, 3, 9, 3], + ... [5, 7, 5, 1]] + >>> transformer = Normalizer().fit(X) # fit does nothing. + >>> transformer + Normalizer() + >>> transformer.transform(X) + array([[0.8, 0.2, 0.4, 0.4], + [0.1, 0.3, 0.9, 0.3], + [0.5, 0.7, 0.5, 0.1]]) + """ + + _parameter_constraints: dict = { + "norm": [StrOptions({"l1", "l2", "max"})], + "copy": ["boolean"], + } + + def __init__(self, norm="l2", *, copy=True): + self.norm = norm + self.copy = copy + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to estimate the normalization parameters. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Fitted transformer. + """ + self._validate_data(X, accept_sparse="csr") + return self + + def transform(self, X, copy=None): + """Scale each non zero row of X to unit norm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to normalize, row by row. scipy.sparse matrices should be + in CSR format to avoid an un-necessary copy. + + copy : bool, default=None + Copy the input X or not. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + copy = copy if copy is not None else self.copy + X = self._validate_data(X, accept_sparse="csr", reset=False) + return normalize(X, norm=self.norm, axis=1, copy=copy) + + def _more_tags(self): + return {"stateless": True, "array_api_support": True} + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "threshold": [Interval(Real, None, None, closed="neither")], + "copy": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def binarize(X, *, threshold=0.0, copy=True): + """Boolean thresholding of array-like or scipy.sparse matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to binarize, element by element. + scipy.sparse matrices should be in CSR or CSC format to avoid an + un-necessary copy. + + threshold : float, default=0.0 + Feature values below or equal to this are replaced by 0, above it by 1. + Threshold may not be less than 0 for operations on sparse matrices. + + copy : bool, default=True + If False, try to avoid a copy and binarize in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an object dtype, a copy will be returned even with + copy=False. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + The transformed data. + + See Also + -------- + Binarizer : Performs binarization using the Transformer API + (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). + + Examples + -------- + >>> from sklearn.preprocessing import binarize + >>> X = [[0.4, 0.6, 0.5], [0.6, 0.1, 0.2]] + >>> binarize(X, threshold=0.5) + array([[0., 1., 0.], + [1., 0., 0.]]) + """ + X = check_array(X, accept_sparse=["csr", "csc"], copy=copy) + if sparse.issparse(X): + if threshold < 0: + raise ValueError("Cannot binarize a sparse matrix with threshold < 0") + cond = X.data > threshold + not_cond = np.logical_not(cond) + X.data[cond] = 1 + X.data[not_cond] = 0 + X.eliminate_zeros() + else: + cond = X > threshold + not_cond = np.logical_not(cond) + X[cond] = 1 + X[not_cond] = 0 + return X + + +class Binarizer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Binarize data (set feature values to 0 or 1) according to a threshold. + + Values greater than the threshold map to 1, while values less than + or equal to the threshold map to 0. With the default threshold of 0, + only positive values map to 1. + + Binarization is a common operation on text count data where the + analyst can decide to only consider the presence or absence of a + feature rather than a quantified number of occurrences for instance. + + It can also be used as a pre-processing step for estimators that + consider boolean random variables (e.g. modelled using the Bernoulli + distribution in a Bayesian setting). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + threshold : float, default=0.0 + Feature values below or equal to this are replaced by 0, above it by 1. + Threshold may not be less than 0 for operations on sparse matrices. + + copy : bool, default=True + Set to False to perform inplace binarization and avoid a copy (if + the input is already a numpy array or a scipy.sparse CSR matrix). + + Attributes + ---------- + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + binarize : Equivalent function without the estimator API. + KBinsDiscretizer : Bin continuous data into intervals. + OneHotEncoder : Encode categorical features as a one-hot numeric array. + + Notes + ----- + If the input is a sparse matrix, only the non-zero values are subject + to update by the :class:`Binarizer` class. + + This estimator is :term:`stateless` and does not need to be fitted. + However, we recommend to call :meth:`fit_transform` instead of + :meth:`transform`, as parameter validation is only performed in + :meth:`fit`. + + Examples + -------- + >>> from sklearn.preprocessing import Binarizer + >>> X = [[ 1., -1., 2.], + ... [ 2., 0., 0.], + ... [ 0., 1., -1.]] + >>> transformer = Binarizer().fit(X) # fit does nothing. + >>> transformer + Binarizer() + >>> transformer.transform(X) + array([[1., 0., 1.], + [1., 0., 0.], + [0., 1., 0.]]) + """ + + _parameter_constraints: dict = { + "threshold": [Real], + "copy": ["boolean"], + } + + def __init__(self, *, threshold=0.0, copy=True): + self.threshold = threshold + self.copy = copy + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted transformer. + """ + self._validate_data(X, accept_sparse="csr") + return self + + def transform(self, X, copy=None): + """Binarize each element of X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to binarize, element by element. + scipy.sparse matrices should be in CSR format to avoid an + un-necessary copy. + + copy : bool + Copy the input X or not. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + copy = copy if copy is not None else self.copy + # TODO: This should be refactored because binarize also calls + # check_array + X = self._validate_data(X, accept_sparse=["csr", "csc"], copy=copy, reset=False) + return binarize(X, threshold=self.threshold, copy=False) + + def _more_tags(self): + return {"stateless": True} + + +class KernelCenterer(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + r"""Center an arbitrary kernel matrix :math:`K`. + + Let define a kernel :math:`K` such that: + + .. math:: + K(X, Y) = \phi(X) . \phi(Y)^{T} + + :math:`\phi(X)` is a function mapping of rows of :math:`X` to a + Hilbert space and :math:`K` is of shape `(n_samples, n_samples)`. + + This class allows to compute :math:`\tilde{K}(X, Y)` such that: + + .. math:: + \tilde{K(X, Y)} = \tilde{\phi}(X) . \tilde{\phi}(Y)^{T} + + :math:`\tilde{\phi}(X)` is the centered mapped data in the Hilbert + space. + + `KernelCenterer` centers the features without explicitly computing the + mapping :math:`\phi(\cdot)`. Working with centered kernels is sometime + expected when dealing with algebra computation such as eigendecomposition + for :class:`~sklearn.decomposition.KernelPCA` for instance. + + Read more in the :ref:`User Guide `. + + Attributes + ---------- + K_fit_rows_ : ndarray of shape (n_samples,) + Average of each column of kernel matrix. + + K_fit_all_ : float + Average of kernel matrix. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.kernel_approximation.Nystroem : Approximate a kernel map + using a subset of the training data. + + References + ---------- + .. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller. + "Nonlinear component analysis as a kernel eigenvalue problem." + Neural computation 10.5 (1998): 1299-1319. + `_ + + Examples + -------- + >>> from sklearn.preprocessing import KernelCenterer + >>> from sklearn.metrics.pairwise import pairwise_kernels + >>> X = [[ 1., -2., 2.], + ... [ -2., 1., 3.], + ... [ 4., 1., -2.]] + >>> K = pairwise_kernels(X, metric='linear') + >>> K + array([[ 9., 2., -2.], + [ 2., 14., -13.], + [ -2., -13., 21.]]) + >>> transformer = KernelCenterer().fit(K) + >>> transformer + KernelCenterer() + >>> transformer.transform(K) + array([[ 5., 0., -5.], + [ 0., 14., -14.], + [ -5., -14., 19.]]) + """ + + def __init__(self): + # Needed for backported inspect.signature compatibility with PyPy + pass + + def fit(self, K, y=None): + """Fit KernelCenterer. + + Parameters + ---------- + K : ndarray of shape (n_samples, n_samples) + Kernel matrix. + + y : None + Ignored. + + Returns + ------- + self : object + Returns the instance itself. + """ + xp, _ = get_namespace(K) + + K = self._validate_data(K, dtype=_array_api.supported_float_dtypes(xp)) + + if K.shape[0] != K.shape[1]: + raise ValueError( + "Kernel matrix must be a square matrix." + " Input is a {}x{} matrix.".format(K.shape[0], K.shape[1]) + ) + + n_samples = K.shape[0] + self.K_fit_rows_ = xp.sum(K, axis=0) / n_samples + self.K_fit_all_ = xp.sum(self.K_fit_rows_) / n_samples + return self + + def transform(self, K, copy=True): + """Center kernel matrix. + + Parameters + ---------- + K : ndarray of shape (n_samples1, n_samples2) + Kernel matrix. + + copy : bool, default=True + Set to False to perform inplace computation. + + Returns + ------- + K_new : ndarray of shape (n_samples1, n_samples2) + Returns the instance itself. + """ + check_is_fitted(self) + + xp, _ = get_namespace(K) + + K = self._validate_data( + K, copy=copy, dtype=_array_api.supported_float_dtypes(xp), reset=False + ) + + K_pred_cols = (xp.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, None] + + K -= self.K_fit_rows_ + K -= K_pred_cols + K += self.K_fit_all_ + + return K + + @property + def _n_features_out(self): + """Number of transformed output features.""" + # Used by ClassNamePrefixFeaturesOutMixin. This model preserves the + # number of input features but this is not a one-to-one mapping in the + # usual sense. Hence the choice not to use OneToOneFeatureMixin to + # implement get_feature_names_out for this class. + return self.n_features_in_ + + def _more_tags(self): + return {"pairwise": True, "array_api_support": True} + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "value": [Interval(Real, None, None, closed="neither")], + }, + prefer_skip_nested_validation=True, +) +def add_dummy_feature(X, value=1.0): + """Augment dataset with an additional dummy feature. + + This is useful for fitting an intercept term with implementations which + cannot otherwise fit it directly. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + value : float + Value to use for the dummy feature. + + Returns + ------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features + 1) + Same data with dummy feature added as first column. + + Examples + -------- + >>> from sklearn.preprocessing import add_dummy_feature + >>> add_dummy_feature([[0, 1], [1, 0]]) + array([[1., 0., 1.], + [1., 1., 0.]]) + """ + X = check_array(X, accept_sparse=["csc", "csr", "coo"], dtype=FLOAT_DTYPES) + n_samples, n_features = X.shape + shape = (n_samples, n_features + 1) + if sparse.issparse(X): + if X.format == "coo": + # Shift columns to the right. + col = X.col + 1 + # Column indices of dummy feature are 0 everywhere. + col = np.concatenate((np.zeros(n_samples), col)) + # Row indices of dummy feature are 0, ..., n_samples-1. + row = np.concatenate((np.arange(n_samples), X.row)) + # Prepend the dummy feature n_samples times. + data = np.concatenate((np.full(n_samples, value), X.data)) + return sparse.coo_matrix((data, (row, col)), shape) + elif X.format == "csc": + # Shift index pointers since we need to add n_samples elements. + indptr = X.indptr + n_samples + # indptr[0] must be 0. + indptr = np.concatenate((np.array([0]), indptr)) + # Row indices of dummy feature are 0, ..., n_samples-1. + indices = np.concatenate((np.arange(n_samples), X.indices)) + # Prepend the dummy feature n_samples times. + data = np.concatenate((np.full(n_samples, value), X.data)) + return sparse.csc_matrix((data, indices, indptr), shape) + else: + klass = X.__class__ + return klass(add_dummy_feature(X.tocoo(), value)) + else: + return np.hstack((np.full((n_samples, 1), value), X)) + + +class QuantileTransformer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Transform features using quantiles information. + + This method transforms the features to follow a uniform or a normal + distribution. Therefore, for a given feature, this transformation tends + to spread out the most frequent values. It also reduces the impact of + (marginal) outliers: this is therefore a robust preprocessing scheme. + + The transformation is applied on each feature independently. First an + estimate of the cumulative distribution function of a feature is + used to map the original values to a uniform distribution. The obtained + values are then mapped to the desired output distribution using the + associated quantile function. Features values of new/unseen data that fall + below or above the fitted range will be mapped to the bounds of the output + distribution. Note that this transform is non-linear. It may distort linear + correlations between variables measured at the same scale but renders + variables measured at different scales more directly comparable. + + For example visualizations, refer to :ref:`Compare QuantileTransformer with + other scalers `. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.19 + + Parameters + ---------- + n_quantiles : int, default=1000 or n_samples + Number of quantiles to be computed. It corresponds to the number + of landmarks used to discretize the cumulative distribution function. + If n_quantiles is larger than the number of samples, n_quantiles is set + to the number of samples as a larger number of quantiles does not give + a better approximation of the cumulative distribution function + estimator. + + output_distribution : {'uniform', 'normal'}, default='uniform' + Marginal distribution for the transformed data. The choices are + 'uniform' (default) or 'normal'. + + ignore_implicit_zeros : bool, default=False + Only applies to sparse matrices. If True, the sparse entries of the + matrix are discarded to compute the quantile statistics. If False, + these entries are treated as zeros. + + subsample : int, default=10_000 + Maximum number of samples used to estimate the quantiles for + computational efficiency. Note that the subsampling procedure may + differ for value-identical sparse and dense matrices. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for subsampling and smoothing + noise. + Please see ``subsample`` for more details. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + copy : bool, default=True + Set to False to perform inplace transformation and avoid a copy (if the + input is already a numpy array). + + Attributes + ---------- + n_quantiles_ : int + The actual number of quantiles used to discretize the cumulative + distribution function. + + quantiles_ : ndarray of shape (n_quantiles, n_features) + The values corresponding the quantiles of reference. + + references_ : ndarray of shape (n_quantiles, ) + Quantiles of references. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + quantile_transform : Equivalent function without the estimator API. + PowerTransformer : Perform mapping to a normal distribution using a power + transform. + StandardScaler : Perform standardization that is faster, but less robust + to outliers. + RobustScaler : Perform robust standardization that removes the influence + of outliers but does not put outliers and inliers on the same scale. + + Notes + ----- + NaNs are treated as missing values: disregarded in fit, and maintained in + transform. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import QuantileTransformer + >>> rng = np.random.RandomState(0) + >>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0) + >>> qt = QuantileTransformer(n_quantiles=10, random_state=0) + >>> qt.fit_transform(X) + array([...]) + """ + + _parameter_constraints: dict = { + "n_quantiles": [Interval(Integral, 1, None, closed="left")], + "output_distribution": [StrOptions({"uniform", "normal"})], + "ignore_implicit_zeros": ["boolean"], + "subsample": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + "copy": ["boolean"], + } + + def __init__( + self, + *, + n_quantiles=1000, + output_distribution="uniform", + ignore_implicit_zeros=False, + subsample=10_000, + random_state=None, + copy=True, + ): + self.n_quantiles = n_quantiles + self.output_distribution = output_distribution + self.ignore_implicit_zeros = ignore_implicit_zeros + self.subsample = subsample + self.random_state = random_state + self.copy = copy + + def _dense_fit(self, X, random_state): + """Compute percentiles for dense matrices. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The data used to scale along the features axis. + """ + if self.ignore_implicit_zeros: + warnings.warn( + "'ignore_implicit_zeros' takes effect only with" + " sparse matrix. This parameter has no effect." + ) + + n_samples, n_features = X.shape + references = self.references_ * 100 + + self.quantiles_ = [] + for col in X.T: + if self.subsample < n_samples: + subsample_idx = random_state.choice( + n_samples, size=self.subsample, replace=False + ) + col = col.take(subsample_idx, mode="clip") + self.quantiles_.append(np.nanpercentile(col, references)) + self.quantiles_ = np.transpose(self.quantiles_) + # Due to floating-point precision error in `np.nanpercentile`, + # make sure that quantiles are monotonically increasing. + # Upstream issue in numpy: + # https://github.com/numpy/numpy/issues/14685 + self.quantiles_ = np.maximum.accumulate(self.quantiles_) + + def _sparse_fit(self, X, random_state): + """Compute percentiles for sparse matrices. + + Parameters + ---------- + X : sparse matrix of shape (n_samples, n_features) + The data used to scale along the features axis. The sparse matrix + needs to be nonnegative. If a sparse matrix is provided, + it will be converted into a sparse ``csc_matrix``. + """ + n_samples, n_features = X.shape + references = self.references_ * 100 + + self.quantiles_ = [] + for feature_idx in range(n_features): + column_nnz_data = X.data[X.indptr[feature_idx] : X.indptr[feature_idx + 1]] + if len(column_nnz_data) > self.subsample: + column_subsample = self.subsample * len(column_nnz_data) // n_samples + if self.ignore_implicit_zeros: + column_data = np.zeros(shape=column_subsample, dtype=X.dtype) + else: + column_data = np.zeros(shape=self.subsample, dtype=X.dtype) + column_data[:column_subsample] = random_state.choice( + column_nnz_data, size=column_subsample, replace=False + ) + else: + if self.ignore_implicit_zeros: + column_data = np.zeros(shape=len(column_nnz_data), dtype=X.dtype) + else: + column_data = np.zeros(shape=n_samples, dtype=X.dtype) + column_data[: len(column_nnz_data)] = column_nnz_data + + if not column_data.size: + # if no nnz, an error will be raised for computing the + # quantiles. Force the quantiles to be zeros. + self.quantiles_.append([0] * len(references)) + else: + self.quantiles_.append(np.nanpercentile(column_data, references)) + self.quantiles_ = np.transpose(self.quantiles_) + # due to floating-point precision error in `np.nanpercentile`, + # make sure the quantiles are monotonically increasing + # Upstream issue in numpy: + # https://github.com/numpy/numpy/issues/14685 + self.quantiles_ = np.maximum.accumulate(self.quantiles_) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Compute the quantiles used for transforming. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to scale along the features axis. If a sparse + matrix is provided, it will be converted into a sparse + ``csc_matrix``. Additionally, the sparse matrix needs to be + nonnegative if `ignore_implicit_zeros` is False. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted transformer. + """ + if self.n_quantiles > self.subsample: + raise ValueError( + "The number of quantiles cannot be greater than" + " the number of samples used. Got {} quantiles" + " and {} samples.".format(self.n_quantiles, self.subsample) + ) + + X = self._check_inputs(X, in_fit=True, copy=False) + n_samples = X.shape[0] + + if self.n_quantiles > n_samples: + warnings.warn( + "n_quantiles (%s) is greater than the total number " + "of samples (%s). n_quantiles is set to " + "n_samples." % (self.n_quantiles, n_samples) + ) + self.n_quantiles_ = max(1, min(self.n_quantiles, n_samples)) + + rng = check_random_state(self.random_state) + + # Create the quantiles of reference + self.references_ = np.linspace(0, 1, self.n_quantiles_, endpoint=True) + if sparse.issparse(X): + self._sparse_fit(X, rng) + else: + self._dense_fit(X, rng) + + return self + + def _transform_col(self, X_col, quantiles, inverse): + """Private function to transform a single feature.""" + + output_distribution = self.output_distribution + + if not inverse: + lower_bound_x = quantiles[0] + upper_bound_x = quantiles[-1] + lower_bound_y = 0 + upper_bound_y = 1 + else: + lower_bound_x = 0 + upper_bound_x = 1 + lower_bound_y = quantiles[0] + upper_bound_y = quantiles[-1] + # for inverse transform, match a uniform distribution + with np.errstate(invalid="ignore"): # hide NaN comparison warnings + if output_distribution == "normal": + X_col = stats.norm.cdf(X_col) + # else output distribution is already a uniform distribution + + # find index for lower and higher bounds + with np.errstate(invalid="ignore"): # hide NaN comparison warnings + if output_distribution == "normal": + lower_bounds_idx = X_col - BOUNDS_THRESHOLD < lower_bound_x + upper_bounds_idx = X_col + BOUNDS_THRESHOLD > upper_bound_x + if output_distribution == "uniform": + lower_bounds_idx = X_col == lower_bound_x + upper_bounds_idx = X_col == upper_bound_x + + isfinite_mask = ~np.isnan(X_col) + X_col_finite = X_col[isfinite_mask] + if not inverse: + # Interpolate in one direction and in the other and take the + # mean. This is in case of repeated values in the features + # and hence repeated quantiles + # + # If we don't do this, only one extreme of the duplicated is + # used (the upper when we do ascending, and the + # lower for descending). We take the mean of these two + X_col[isfinite_mask] = 0.5 * ( + np.interp(X_col_finite, quantiles, self.references_) + - np.interp(-X_col_finite, -quantiles[::-1], -self.references_[::-1]) + ) + else: + X_col[isfinite_mask] = np.interp(X_col_finite, self.references_, quantiles) + + X_col[upper_bounds_idx] = upper_bound_y + X_col[lower_bounds_idx] = lower_bound_y + # for forward transform, match the output distribution + if not inverse: + with np.errstate(invalid="ignore"): # hide NaN comparison warnings + if output_distribution == "normal": + X_col = stats.norm.ppf(X_col) + # find the value to clip the data to avoid mapping to + # infinity. Clip such that the inverse transform will be + # consistent + clip_min = stats.norm.ppf(BOUNDS_THRESHOLD - np.spacing(1)) + clip_max = stats.norm.ppf(1 - (BOUNDS_THRESHOLD - np.spacing(1))) + X_col = np.clip(X_col, clip_min, clip_max) + # else output distribution is uniform and the ppf is the + # identity function so we let X_col unchanged + + return X_col + + def _check_inputs(self, X, in_fit, accept_sparse_negative=False, copy=False): + """Check inputs before fit and transform.""" + X = self._validate_data( + X, + reset=in_fit, + accept_sparse="csc", + copy=copy, + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + # we only accept positive sparse matrix when ignore_implicit_zeros is + # false and that we call fit or transform. + with np.errstate(invalid="ignore"): # hide NaN comparison warnings + if ( + not accept_sparse_negative + and not self.ignore_implicit_zeros + and (sparse.issparse(X) and np.any(X.data < 0)) + ): + raise ValueError( + "QuantileTransformer only accepts non-negative sparse matrices." + ) + + return X + + def _transform(self, X, inverse=False): + """Forward and inverse transform. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The data used to scale along the features axis. + + inverse : bool, default=False + If False, apply forward transform. If True, apply + inverse transform. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + Projected data. + """ + if sparse.issparse(X): + for feature_idx in range(X.shape[1]): + column_slice = slice(X.indptr[feature_idx], X.indptr[feature_idx + 1]) + X.data[column_slice] = self._transform_col( + X.data[column_slice], self.quantiles_[:, feature_idx], inverse + ) + else: + for feature_idx in range(X.shape[1]): + X[:, feature_idx] = self._transform_col( + X[:, feature_idx], self.quantiles_[:, feature_idx], inverse + ) + + return X + + def transform(self, X): + """Feature-wise transformation of the data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to scale along the features axis. If a sparse + matrix is provided, it will be converted into a sparse + ``csc_matrix``. Additionally, the sparse matrix needs to be + nonnegative if `ignore_implicit_zeros` is False. + + Returns + ------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) + The projected data. + """ + check_is_fitted(self) + X = self._check_inputs(X, in_fit=False, copy=self.copy) + + return self._transform(X, inverse=False) + + def inverse_transform(self, X): + """Back-projection to the original space. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to scale along the features axis. If a sparse + matrix is provided, it will be converted into a sparse + ``csc_matrix``. Additionally, the sparse matrix needs to be + nonnegative if `ignore_implicit_zeros` is False. + + Returns + ------- + Xt : {ndarray, sparse matrix} of (n_samples, n_features) + The projected data. + """ + check_is_fitted(self) + X = self._check_inputs( + X, in_fit=False, accept_sparse_negative=True, copy=self.copy + ) + + return self._transform(X, inverse=True) + + def _more_tags(self): + return {"allow_nan": True} + + +@validate_params( + {"X": ["array-like", "sparse matrix"], "axis": [Options(Integral, {0, 1})]}, + prefer_skip_nested_validation=False, +) +def quantile_transform( + X, + *, + axis=0, + n_quantiles=1000, + output_distribution="uniform", + ignore_implicit_zeros=False, + subsample=int(1e5), + random_state=None, + copy=True, +): + """Transform features using quantiles information. + + This method transforms the features to follow a uniform or a normal + distribution. Therefore, for a given feature, this transformation tends + to spread out the most frequent values. It also reduces the impact of + (marginal) outliers: this is therefore a robust preprocessing scheme. + + The transformation is applied on each feature independently. First an + estimate of the cumulative distribution function of a feature is + used to map the original values to a uniform distribution. The obtained + values are then mapped to the desired output distribution using the + associated quantile function. Features values of new/unseen data that fall + below or above the fitted range will be mapped to the bounds of the output + distribution. Note that this transform is non-linear. It may distort linear + correlations between variables measured at the same scale but renders + variables measured at different scales more directly comparable. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to transform. + + axis : int, default=0 + Axis used to compute the means and standard deviations along. If 0, + transform each feature, otherwise (if 1) transform each sample. + + n_quantiles : int, default=1000 or n_samples + Number of quantiles to be computed. It corresponds to the number + of landmarks used to discretize the cumulative distribution function. + If n_quantiles is larger than the number of samples, n_quantiles is set + to the number of samples as a larger number of quantiles does not give + a better approximation of the cumulative distribution function + estimator. + + output_distribution : {'uniform', 'normal'}, default='uniform' + Marginal distribution for the transformed data. The choices are + 'uniform' (default) or 'normal'. + + ignore_implicit_zeros : bool, default=False + Only applies to sparse matrices. If True, the sparse entries of the + matrix are discarded to compute the quantile statistics. If False, + these entries are treated as zeros. + + subsample : int, default=1e5 + Maximum number of samples used to estimate the quantiles for + computational efficiency. Note that the subsampling procedure may + differ for value-identical sparse and dense matrices. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for subsampling and smoothing + noise. + Please see ``subsample`` for more details. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + copy : bool, default=True + If False, try to avoid a copy and transform in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + .. versionchanged:: 0.23 + The default value of `copy` changed from False to True in 0.23. + + Returns + ------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) + The transformed data. + + See Also + -------- + QuantileTransformer : Performs quantile-based scaling using the + Transformer API (e.g. as part of a preprocessing + :class:`~sklearn.pipeline.Pipeline`). + power_transform : Maps data to a normal distribution using a + power transformation. + scale : Performs standardization that is faster, but less robust + to outliers. + robust_scale : Performs robust standardization that removes the influence + of outliers but does not put outliers and inliers on the same scale. + + Notes + ----- + NaNs are treated as missing values: disregarded in fit, and maintained in + transform. + + .. warning:: Risk of data leak + + Do not use :func:`~sklearn.preprocessing.quantile_transform` unless + you know what you are doing. A common mistake is to apply it + to the entire data *before* splitting into training and + test sets. This will bias the model evaluation because + information would have leaked from the test set to the + training set. + In general, we recommend using + :class:`~sklearn.preprocessing.QuantileTransformer` within a + :ref:`Pipeline ` in order to prevent most risks of data + leaking:`pipe = make_pipeline(QuantileTransformer(), + LogisticRegression())`. + + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import quantile_transform + >>> rng = np.random.RandomState(0) + >>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0) + >>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True) + array([...]) + """ + n = QuantileTransformer( + n_quantiles=n_quantiles, + output_distribution=output_distribution, + subsample=subsample, + ignore_implicit_zeros=ignore_implicit_zeros, + random_state=random_state, + copy=copy, + ) + if axis == 0: + X = n.fit_transform(X) + else: # axis == 1 + X = n.fit_transform(X.T).T + return X + + +class PowerTransformer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Apply a power transform featurewise to make data more Gaussian-like. + + Power transforms are a family of parametric, monotonic transformations + that are applied to make data more Gaussian-like. This is useful for + modeling issues related to heteroscedasticity (non-constant variance), + or other situations where normality is desired. + + Currently, PowerTransformer supports the Box-Cox transform and the + Yeo-Johnson transform. The optimal parameter for stabilizing variance and + minimizing skewness is estimated through maximum likelihood. + + Box-Cox requires input data to be strictly positive, while Yeo-Johnson + supports both positive or negative data. + + By default, zero-mean, unit-variance normalization is applied to the + transformed data. + + For an example visualization, refer to :ref:`Compare PowerTransformer with + other scalers `. To see the + effect of Box-Cox and Yeo-Johnson transformations on different + distributions, see: + :ref:`sphx_glr_auto_examples_preprocessing_plot_map_data_to_normal.py`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson' + The power transform method. Available methods are: + + - 'yeo-johnson' [1]_, works with positive and negative values + - 'box-cox' [2]_, only works with strictly positive values + + standardize : bool, default=True + Set to True to apply zero-mean, unit-variance normalization to the + transformed output. + + copy : bool, default=True + Set to False to perform inplace computation during transformation. + + Attributes + ---------- + lambdas_ : ndarray of float of shape (n_features,) + The parameters of the power transformation for the selected features. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + power_transform : Equivalent function without the estimator API. + + QuantileTransformer : Maps data to a standard normal distribution with + the parameter `output_distribution='normal'`. + + Notes + ----- + NaNs are treated as missing values: disregarded in ``fit``, and maintained + in ``transform``. + + References + ---------- + + .. [1] :doi:`I.K. Yeo and R.A. Johnson, "A new family of power + transformations to improve normality or symmetry." Biometrika, + 87(4), pp.954-959, (2000). <10.1093/biomet/87.4.954>` + + .. [2] :doi:`G.E.P. Box and D.R. Cox, "An Analysis of Transformations", + Journal of the Royal Statistical Society B, 26, 211-252 (1964). + <10.1111/j.2517-6161.1964.tb00553.x>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import PowerTransformer + >>> pt = PowerTransformer() + >>> data = [[1, 2], [3, 2], [4, 5]] + >>> print(pt.fit(data)) + PowerTransformer() + >>> print(pt.lambdas_) + [ 1.386... -3.100...] + >>> print(pt.transform(data)) + [[-1.316... -0.707...] + [ 0.209... -0.707...] + [ 1.106... 1.414...]] + """ + + _parameter_constraints: dict = { + "method": [StrOptions({"yeo-johnson", "box-cox"})], + "standardize": ["boolean"], + "copy": ["boolean"], + } + + def __init__(self, method="yeo-johnson", *, standardize=True, copy=True): + self.method = method + self.standardize = standardize + self.copy = copy + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Estimate the optimal parameter lambda for each feature. + + The optimal lambda parameter for minimizing skewness is estimated on + each feature independently using maximum likelihood. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data used to estimate the optimal transformation parameters. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted transformer. + """ + self._fit(X, y=y, force_transform=False) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit `PowerTransformer` to `X`, then transform `X`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data used to estimate the optimal transformation parameters + and to be transformed using a power transformation. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_features) + Transformed data. + """ + return self._fit(X, y, force_transform=True) + + def _fit(self, X, y=None, force_transform=False): + X = self._check_input(X, in_fit=True, check_positive=True) + + if not self.copy and not force_transform: # if call from fit() + X = X.copy() # force copy so that fit does not change X inplace + + n_samples = X.shape[0] + mean = np.mean(X, axis=0, dtype=np.float64) + var = np.var(X, axis=0, dtype=np.float64) + + optim_function = { + "box-cox": self._box_cox_optimize, + "yeo-johnson": self._yeo_johnson_optimize, + }[self.method] + + transform_function = { + "box-cox": boxcox, + "yeo-johnson": self._yeo_johnson_transform, + }[self.method] + + with np.errstate(invalid="ignore"): # hide NaN warnings + self.lambdas_ = np.empty(X.shape[1], dtype=X.dtype) + for i, col in enumerate(X.T): + # For yeo-johnson, leave constant features unchanged + # lambda=1 corresponds to the identity transformation + is_constant_feature = _is_constant_feature(var[i], mean[i], n_samples) + if self.method == "yeo-johnson" and is_constant_feature: + self.lambdas_[i] = 1.0 + continue + + self.lambdas_[i] = optim_function(col) + + if self.standardize or force_transform: + X[:, i] = transform_function(X[:, i], self.lambdas_[i]) + + if self.standardize: + self._scaler = StandardScaler(copy=False).set_output(transform="default") + if force_transform: + X = self._scaler.fit_transform(X) + else: + self._scaler.fit(X) + + return X + + def transform(self, X): + """Apply the power transform to each feature using the fitted lambdas. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to be transformed using a power transformation. + + Returns + ------- + X_trans : ndarray of shape (n_samples, n_features) + The transformed data. + """ + check_is_fitted(self) + X = self._check_input(X, in_fit=False, check_positive=True, check_shape=True) + + transform_function = { + "box-cox": boxcox, + "yeo-johnson": self._yeo_johnson_transform, + }[self.method] + for i, lmbda in enumerate(self.lambdas_): + with np.errstate(invalid="ignore"): # hide NaN warnings + X[:, i] = transform_function(X[:, i], lmbda) + + if self.standardize: + X = self._scaler.transform(X) + + return X + + def inverse_transform(self, X): + """Apply the inverse power transformation using the fitted lambdas. + + The inverse of the Box-Cox transformation is given by:: + + if lambda_ == 0: + X = exp(X_trans) + else: + X = (X_trans * lambda_ + 1) ** (1 / lambda_) + + The inverse of the Yeo-Johnson transformation is given by:: + + if X >= 0 and lambda_ == 0: + X = exp(X_trans) - 1 + elif X >= 0 and lambda_ != 0: + X = (X_trans * lambda_ + 1) ** (1 / lambda_) - 1 + elif X < 0 and lambda_ != 2: + X = 1 - (-(2 - lambda_) * X_trans + 1) ** (1 / (2 - lambda_)) + elif X < 0 and lambda_ == 2: + X = 1 - exp(-X_trans) + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The transformed data. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The original data. + """ + check_is_fitted(self) + X = self._check_input(X, in_fit=False, check_shape=True) + + if self.standardize: + X = self._scaler.inverse_transform(X) + + inv_fun = { + "box-cox": self._box_cox_inverse_tranform, + "yeo-johnson": self._yeo_johnson_inverse_transform, + }[self.method] + for i, lmbda in enumerate(self.lambdas_): + with np.errstate(invalid="ignore"): # hide NaN warnings + X[:, i] = inv_fun(X[:, i], lmbda) + + return X + + def _box_cox_inverse_tranform(self, x, lmbda): + """Return inverse-transformed input x following Box-Cox inverse + transform with parameter lambda. + """ + if lmbda == 0: + x_inv = np.exp(x) + else: + x_inv = (x * lmbda + 1) ** (1 / lmbda) + + return x_inv + + def _yeo_johnson_inverse_transform(self, x, lmbda): + """Return inverse-transformed input x following Yeo-Johnson inverse + transform with parameter lambda. + """ + x_inv = np.zeros_like(x) + pos = x >= 0 + + # when x >= 0 + if abs(lmbda) < np.spacing(1.0): + x_inv[pos] = np.exp(x[pos]) - 1 + else: # lmbda != 0 + x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1 + + # when x < 0 + if abs(lmbda - 2) > np.spacing(1.0): + x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1, 1 / (2 - lmbda)) + else: # lmbda == 2 + x_inv[~pos] = 1 - np.exp(-x[~pos]) + + return x_inv + + def _yeo_johnson_transform(self, x, lmbda): + """Return transformed input x following Yeo-Johnson transform with + parameter lambda. + """ + + out = np.zeros_like(x) + pos = x >= 0 # binary mask + + # when x >= 0 + if abs(lmbda) < np.spacing(1.0): + out[pos] = np.log1p(x[pos]) + else: # lmbda != 0 + out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda + + # when x < 0 + if abs(lmbda - 2) > np.spacing(1.0): + out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda) + else: # lmbda == 2 + out[~pos] = -np.log1p(-x[~pos]) + + return out + + def _box_cox_optimize(self, x): + """Find and return optimal lambda parameter of the Box-Cox transform by + MLE, for observed data x. + + We here use scipy builtins which uses the brent optimizer. + """ + mask = np.isnan(x) + if np.all(mask): + raise ValueError("Column must not be all nan.") + + # the computation of lambda is influenced by NaNs so we need to + # get rid of them + _, lmbda = stats.boxcox(x[~mask], lmbda=None) + + return lmbda + + def _yeo_johnson_optimize(self, x): + """Find and return optimal lambda parameter of the Yeo-Johnson + transform by MLE, for observed data x. + + Like for Box-Cox, MLE is done via the brent optimizer. + """ + x_tiny = np.finfo(np.float64).tiny + + def _neg_log_likelihood(lmbda): + """Return the negative log likelihood of the observed data x as a + function of lambda.""" + x_trans = self._yeo_johnson_transform(x, lmbda) + n_samples = x.shape[0] + x_trans_var = x_trans.var() + + # Reject transformed data that would raise a RuntimeWarning in np.log + if x_trans_var < x_tiny: + return np.inf + + log_var = np.log(x_trans_var) + loglike = -n_samples / 2 * log_var + loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum() + + return -loglike + + # the computation of lambda is influenced by NaNs so we need to + # get rid of them + x = x[~np.isnan(x)] + # choosing bracket -2, 2 like for boxcox + return optimize.brent(_neg_log_likelihood, brack=(-2, 2)) + + def _check_input(self, X, in_fit, check_positive=False, check_shape=False): + """Validate the input before fit and transform. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + in_fit : bool + Whether or not `_check_input` is called from `fit` or other + methods, e.g. `predict`, `transform`, etc. + + check_positive : bool, default=False + If True, check that all data is positive and non-zero (only if + ``self.method=='box-cox'``). + + check_shape : bool, default=False + If True, check that n_features matches the length of self.lambdas_ + """ + X = self._validate_data( + X, + ensure_2d=True, + dtype=FLOAT_DTYPES, + copy=self.copy, + force_all_finite="allow-nan", + reset=in_fit, + ) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered") + if check_positive and self.method == "box-cox" and np.nanmin(X) <= 0: + raise ValueError( + "The Box-Cox transformation can only be " + "applied to strictly positive data" + ) + + if check_shape and not X.shape[1] == len(self.lambdas_): + raise ValueError( + "Input data has a different number of features " + "than fitting data. Should have {n}, data has {m}".format( + n=len(self.lambdas_), m=X.shape[1] + ) + ) + + return X + + def _more_tags(self): + return {"allow_nan": True} + + +@validate_params( + {"X": ["array-like"]}, + prefer_skip_nested_validation=False, +) +def power_transform(X, method="yeo-johnson", *, standardize=True, copy=True): + """Parametric, monotonic transformation to make data more Gaussian-like. + + Power transforms are a family of parametric, monotonic transformations + that are applied to make data more Gaussian-like. This is useful for + modeling issues related to heteroscedasticity (non-constant variance), + or other situations where normality is desired. + + Currently, power_transform supports the Box-Cox transform and the + Yeo-Johnson transform. The optimal parameter for stabilizing variance and + minimizing skewness is estimated through maximum likelihood. + + Box-Cox requires input data to be strictly positive, while Yeo-Johnson + supports both positive or negative data. + + By default, zero-mean, unit-variance normalization is applied to the + transformed data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to be transformed using a power transformation. + + method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson' + The power transform method. Available methods are: + + - 'yeo-johnson' [1]_, works with positive and negative values + - 'box-cox' [2]_, only works with strictly positive values + + .. versionchanged:: 0.23 + The default value of the `method` parameter changed from + 'box-cox' to 'yeo-johnson' in 0.23. + + standardize : bool, default=True + Set to True to apply zero-mean, unit-variance normalization to the + transformed output. + + copy : bool, default=True + If False, try to avoid a copy and transform in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + Returns + ------- + X_trans : ndarray of shape (n_samples, n_features) + The transformed data. + + See Also + -------- + PowerTransformer : Equivalent transformation with the + Transformer API (e.g. as part of a preprocessing + :class:`~sklearn.pipeline.Pipeline`). + + quantile_transform : Maps data to a standard normal distribution with + the parameter `output_distribution='normal'`. + + Notes + ----- + NaNs are treated as missing values: disregarded in ``fit``, and maintained + in ``transform``. + + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + References + ---------- + + .. [1] I.K. Yeo and R.A. Johnson, "A new family of power transformations to + improve normality or symmetry." Biometrika, 87(4), pp.954-959, + (2000). + + .. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal + of the Royal Statistical Society B, 26, 211-252 (1964). + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import power_transform + >>> data = [[1, 2], [3, 2], [4, 5]] + >>> print(power_transform(data, method='box-cox')) + [[-1.332... -0.707...] + [ 0.256... -0.707...] + [ 1.076... 1.414...]] + + .. warning:: Risk of data leak. + Do not use :func:`~sklearn.preprocessing.power_transform` unless you + know what you are doing. A common mistake is to apply it to the entire + data *before* splitting into training and test sets. This will bias the + model evaluation because information would have leaked from the test + set to the training set. + In general, we recommend using + :class:`~sklearn.preprocessing.PowerTransformer` within a + :ref:`Pipeline ` in order to prevent most risks of data + leaking, e.g.: `pipe = make_pipeline(PowerTransformer(), + LogisticRegression())`. + """ + pt = PowerTransformer(method=method, standardize=standardize, copy=copy) + return pt.fit_transform(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/_discretization.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_discretization.py new file mode 100644 index 0000000000000000000000000000000000000000..033bdd960d2b215bde276e17637858b62e98ffa0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_discretization.py @@ -0,0 +1,472 @@ +# Author: Henry Lin +# Tom Dupré la Tour + +# License: BSD + + +import warnings +from numbers import Integral + +import numpy as np + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import _safe_indexing +from ..utils._param_validation import Hidden, Interval, Options, StrOptions +from ..utils.stats import _weighted_percentile +from ..utils.validation import ( + _check_feature_names_in, + _check_sample_weight, + check_array, + check_is_fitted, + check_random_state, +) +from ._encoders import OneHotEncoder + + +class KBinsDiscretizer(TransformerMixin, BaseEstimator): + """ + Bin continuous data into intervals. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + n_bins : int or array-like of shape (n_features,), default=5 + The number of bins to produce. Raises ValueError if ``n_bins < 2``. + + encode : {'onehot', 'onehot-dense', 'ordinal'}, default='onehot' + Method used to encode the transformed result. + + - 'onehot': Encode the transformed result with one-hot encoding + and return a sparse matrix. Ignored features are always + stacked to the right. + - 'onehot-dense': Encode the transformed result with one-hot encoding + and return a dense array. Ignored features are always + stacked to the right. + - 'ordinal': Return the bin identifier encoded as an integer value. + + strategy : {'uniform', 'quantile', 'kmeans'}, default='quantile' + Strategy used to define the widths of the bins. + + - 'uniform': All bins in each feature have identical widths. + - 'quantile': All bins in each feature have the same number of points. + - 'kmeans': Values in each bin have the same nearest center of a 1D + k-means cluster. + + For an example of the different strategies see: + :ref:`sphx_glr_auto_examples_preprocessing_plot_discretization_strategies.py`. + + dtype : {np.float32, np.float64}, default=None + The desired data-type for the output. If None, output dtype is + consistent with input dtype. Only np.float32 and np.float64 are + supported. + + .. versionadded:: 0.24 + + subsample : int or None, default='warn' + Maximum number of samples, used to fit the model, for computational + efficiency. Defaults to 200_000 when `strategy='quantile'` and to `None` + when `strategy='uniform'` or `strategy='kmeans'`. + `subsample=None` means that all the training samples are used when + computing the quantiles that determine the binning thresholds. + Since quantile computation relies on sorting each column of `X` and + that sorting has an `n log(n)` time complexity, + it is recommended to use subsampling on datasets with a + very large number of samples. + + .. versionchanged:: 1.3 + The default value of `subsample` changed from `None` to `200_000` when + `strategy="quantile"`. + + .. versionchanged:: 1.5 + The default value of `subsample` changed from `None` to `200_000` when + `strategy="uniform"` or `strategy="kmeans"`. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for subsampling. + Pass an int for reproducible results across multiple function calls. + See the `subsample` parameter for more details. + See :term:`Glossary `. + + .. versionadded:: 1.1 + + Attributes + ---------- + bin_edges_ : ndarray of ndarray of shape (n_features,) + The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )`` + Ignored features will have empty arrays. + + n_bins_ : ndarray of shape (n_features,), dtype=np.int64 + Number of bins per feature. Bins whose width are too small + (i.e., <= 1e-8) are removed with a warning. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Binarizer : Class used to bin values as ``0`` or + ``1`` based on a parameter ``threshold``. + + Notes + ----- + + For a visualization of discretization on different datasets refer to + :ref:`sphx_glr_auto_examples_preprocessing_plot_discretization_classification.py`. + On the effect of discretization on linear models see: + :ref:`sphx_glr_auto_examples_preprocessing_plot_discretization.py`. + + In bin edges for feature ``i``, the first and last values are used only for + ``inverse_transform``. During transform, bin edges are extended to:: + + np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf]) + + You can combine ``KBinsDiscretizer`` with + :class:`~sklearn.compose.ColumnTransformer` if you only want to preprocess + part of the features. + + ``KBinsDiscretizer`` might produce constant features (e.g., when + ``encode = 'onehot'`` and certain bins do not contain any data). + These features can be removed with feature selection algorithms + (e.g., :class:`~sklearn.feature_selection.VarianceThreshold`). + + Examples + -------- + >>> from sklearn.preprocessing import KBinsDiscretizer + >>> X = [[-2, 1, -4, -1], + ... [-1, 2, -3, -0.5], + ... [ 0, 3, -2, 0.5], + ... [ 1, 4, -1, 2]] + >>> est = KBinsDiscretizer( + ... n_bins=3, encode='ordinal', strategy='uniform', subsample=None + ... ) + >>> est.fit(X) + KBinsDiscretizer(...) + >>> Xt = est.transform(X) + >>> Xt # doctest: +SKIP + array([[ 0., 0., 0., 0.], + [ 1., 1., 1., 0.], + [ 2., 2., 2., 1.], + [ 2., 2., 2., 2.]]) + + Sometimes it may be useful to convert the data back into the original + feature space. The ``inverse_transform`` function converts the binned + data into the original feature space. Each value will be equal to the mean + of the two bin edges. + + >>> est.bin_edges_[0] + array([-2., -1., 0., 1.]) + >>> est.inverse_transform(Xt) + array([[-1.5, 1.5, -3.5, -0.5], + [-0.5, 2.5, -2.5, -0.5], + [ 0.5, 3.5, -1.5, 0.5], + [ 0.5, 3.5, -1.5, 1.5]]) + """ + + _parameter_constraints: dict = { + "n_bins": [Interval(Integral, 2, None, closed="left"), "array-like"], + "encode": [StrOptions({"onehot", "onehot-dense", "ordinal"})], + "strategy": [StrOptions({"uniform", "quantile", "kmeans"})], + "dtype": [Options(type, {np.float64, np.float32}), None], + "subsample": [ + Interval(Integral, 1, None, closed="left"), + None, + Hidden(StrOptions({"warn"})), + ], + "random_state": ["random_state"], + } + + def __init__( + self, + n_bins=5, + *, + encode="onehot", + strategy="quantile", + dtype=None, + subsample="warn", + random_state=None, + ): + self.n_bins = n_bins + self.encode = encode + self.strategy = strategy + self.dtype = dtype + self.subsample = subsample + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, sample_weight=None): + """ + Fit the estimator. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data to be discretized. + + y : None + Ignored. This parameter exists only for compatibility with + :class:`~sklearn.pipeline.Pipeline`. + + sample_weight : ndarray of shape (n_samples,) + Contains weight values to be associated with each sample. + Only possible when `strategy` is set to `"quantile"`. + + .. versionadded:: 1.3 + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X, dtype="numeric") + + if self.dtype in (np.float64, np.float32): + output_dtype = self.dtype + else: # self.dtype is None + output_dtype = X.dtype + + n_samples, n_features = X.shape + + if sample_weight is not None and self.strategy == "uniform": + raise ValueError( + "`sample_weight` was provided but it cannot be " + "used with strategy='uniform'. Got strategy=" + f"{self.strategy!r} instead." + ) + + if self.strategy in ("uniform", "kmeans") and self.subsample == "warn": + warnings.warn( + ( + "In version 1.5 onwards, subsample=200_000 " + "will be used by default. Set subsample explicitly to " + "silence this warning in the mean time. Set " + "subsample=None to disable subsampling explicitly." + ), + FutureWarning, + ) + + subsample = self.subsample + if subsample == "warn": + subsample = 200000 if self.strategy == "quantile" else None + if subsample is not None and n_samples > subsample: + rng = check_random_state(self.random_state) + subsample_idx = rng.choice(n_samples, size=subsample, replace=False) + X = _safe_indexing(X, subsample_idx) + + n_features = X.shape[1] + n_bins = self._validate_n_bins(n_features) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + bin_edges = np.zeros(n_features, dtype=object) + for jj in range(n_features): + column = X[:, jj] + col_min, col_max = column.min(), column.max() + + if col_min == col_max: + warnings.warn( + "Feature %d is constant and will be replaced with 0." % jj + ) + n_bins[jj] = 1 + bin_edges[jj] = np.array([-np.inf, np.inf]) + continue + + if self.strategy == "uniform": + bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1) + + elif self.strategy == "quantile": + quantiles = np.linspace(0, 100, n_bins[jj] + 1) + if sample_weight is None: + bin_edges[jj] = np.asarray(np.percentile(column, quantiles)) + else: + bin_edges[jj] = np.asarray( + [ + _weighted_percentile(column, sample_weight, q) + for q in quantiles + ], + dtype=np.float64, + ) + elif self.strategy == "kmeans": + from ..cluster import KMeans # fixes import loops + + # Deterministic initialization with uniform spacing + uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1) + init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5 + + # 1D k-means procedure + km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1) + centers = km.fit( + column[:, None], sample_weight=sample_weight + ).cluster_centers_[:, 0] + # Must sort, centers may be unsorted even with sorted init + centers.sort() + bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5 + bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max] + + # Remove bins whose width are too small (i.e., <= 1e-8) + if self.strategy in ("quantile", "kmeans"): + mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8 + bin_edges[jj] = bin_edges[jj][mask] + if len(bin_edges[jj]) - 1 != n_bins[jj]: + warnings.warn( + "Bins whose width are too small (i.e., <= " + "1e-8) in feature %d are removed. Consider " + "decreasing the number of bins." % jj + ) + n_bins[jj] = len(bin_edges[jj]) - 1 + + self.bin_edges_ = bin_edges + self.n_bins_ = n_bins + + if "onehot" in self.encode: + self._encoder = OneHotEncoder( + categories=[np.arange(i) for i in self.n_bins_], + sparse_output=self.encode == "onehot", + dtype=output_dtype, + ) + # Fit the OneHotEncoder with toy datasets + # so that it's ready for use after the KBinsDiscretizer is fitted + self._encoder.fit(np.zeros((1, len(self.n_bins_)))) + + return self + + def _validate_n_bins(self, n_features): + """Returns n_bins_, the number of bins per feature.""" + orig_bins = self.n_bins + if isinstance(orig_bins, Integral): + return np.full(n_features, orig_bins, dtype=int) + + n_bins = check_array(orig_bins, dtype=int, copy=True, ensure_2d=False) + + if n_bins.ndim > 1 or n_bins.shape[0] != n_features: + raise ValueError("n_bins must be a scalar or array of shape (n_features,).") + + bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins) + + violating_indices = np.where(bad_nbins_value)[0] + if violating_indices.shape[0] > 0: + indices = ", ".join(str(i) for i in violating_indices) + raise ValueError( + "{} received an invalid number " + "of bins at indices {}. Number of bins " + "must be at least 2, and must be an int.".format( + KBinsDiscretizer.__name__, indices + ) + ) + return n_bins + + def transform(self, X): + """ + Discretize the data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data to be discretized. + + Returns + ------- + Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64} + Data in the binned space. Will be a sparse matrix if + `self.encode='onehot'` and ndarray otherwise. + """ + check_is_fitted(self) + + # check input and attribute dtypes + dtype = (np.float64, np.float32) if self.dtype is None else self.dtype + Xt = self._validate_data(X, copy=True, dtype=dtype, reset=False) + + bin_edges = self.bin_edges_ + for jj in range(Xt.shape[1]): + Xt[:, jj] = np.searchsorted(bin_edges[jj][1:-1], Xt[:, jj], side="right") + + if self.encode == "ordinal": + return Xt + + dtype_init = None + if "onehot" in self.encode: + dtype_init = self._encoder.dtype + self._encoder.dtype = Xt.dtype + try: + Xt_enc = self._encoder.transform(Xt) + finally: + # revert the initial dtype to avoid modifying self. + self._encoder.dtype = dtype_init + return Xt_enc + + def inverse_transform(self, Xt): + """ + Transform discretized data back to original feature space. + + Note that this function does not regenerate the original data + due to discretization rounding. + + Parameters + ---------- + Xt : array-like of shape (n_samples, n_features) + Transformed data in the binned space. + + Returns + ------- + Xinv : ndarray, dtype={np.float32, np.float64} + Data in the original feature space. + """ + check_is_fitted(self) + + if "onehot" in self.encode: + Xt = self._encoder.inverse_transform(Xt) + + Xinv = check_array(Xt, copy=True, dtype=(np.float64, np.float32)) + n_features = self.n_bins_.shape[0] + if Xinv.shape[1] != n_features: + raise ValueError( + "Incorrect number of features. Expecting {}, received {}.".format( + n_features, Xinv.shape[1] + ) + ) + + for jj in range(n_features): + bin_edges = self.bin_edges_[jj] + bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5 + Xinv[:, jj] = bin_centers[(Xinv[:, jj]).astype(np.int64)] + + return Xinv + + def get_feature_names_out(self, input_features=None): + """Get output feature names. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + input_features = _check_feature_names_in(self, input_features) + if hasattr(self, "_encoder"): + return self._encoder.get_feature_names_out(input_features) + + # ordinal encoding + return input_features diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/_encoders.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_encoders.py new file mode 100644 index 0000000000000000000000000000000000000000..3feadc68e8d2dc3d7829363a9ba9f2db1b6d6910 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_encoders.py @@ -0,0 +1,1678 @@ +# Authors: Andreas Mueller +# Joris Van den Bossche +# License: BSD 3 clause + +import numbers +import warnings +from numbers import Integral + +import numpy as np +from scipy import sparse + +from ..base import BaseEstimator, OneToOneFeatureMixin, TransformerMixin, _fit_context +from ..utils import _safe_indexing, check_array, is_scalar_nan +from ..utils._encode import _check_unknown, _encode, _get_counts, _unique +from ..utils._mask import _get_mask +from ..utils._param_validation import Interval, RealNotInt, StrOptions +from ..utils._set_output import _get_output_config +from ..utils.validation import _check_feature_names_in, check_is_fitted + +__all__ = ["OneHotEncoder", "OrdinalEncoder"] + + +class _BaseEncoder(TransformerMixin, BaseEstimator): + """ + Base class for encoders that includes the code to categorize and + transform the input features. + + """ + + def _check_X(self, X, force_all_finite=True): + """ + Perform custom check_array: + - convert list of strings to object dtype + - check for missing values for object dtype data (check_array does + not do that) + - return list of features (arrays): this list of features is + constructed feature by feature to preserve the data types + of pandas DataFrame columns, as otherwise information is lost + and cannot be used, e.g. for the `categories_` attribute. + + """ + if not (hasattr(X, "iloc") and getattr(X, "ndim", 0) == 2): + # if not a dataframe, do normal check_array validation + X_temp = check_array(X, dtype=None, force_all_finite=force_all_finite) + if not hasattr(X, "dtype") and np.issubdtype(X_temp.dtype, np.str_): + X = check_array(X, dtype=object, force_all_finite=force_all_finite) + else: + X = X_temp + needs_validation = False + else: + # pandas dataframe, do validation later column by column, in order + # to keep the dtype information to be used in the encoder. + needs_validation = force_all_finite + + n_samples, n_features = X.shape + X_columns = [] + + for i in range(n_features): + Xi = _safe_indexing(X, indices=i, axis=1) + Xi = check_array( + Xi, ensure_2d=False, dtype=None, force_all_finite=needs_validation + ) + X_columns.append(Xi) + + return X_columns, n_samples, n_features + + def _fit( + self, + X, + handle_unknown="error", + force_all_finite=True, + return_counts=False, + return_and_ignore_missing_for_infrequent=False, + ): + self._check_infrequent_enabled() + self._check_n_features(X, reset=True) + self._check_feature_names(X, reset=True) + X_list, n_samples, n_features = self._check_X( + X, force_all_finite=force_all_finite + ) + self.n_features_in_ = n_features + + if self.categories != "auto": + if len(self.categories) != n_features: + raise ValueError( + "Shape mismatch: if categories is an array," + " it has to be of shape (n_features,)." + ) + + self.categories_ = [] + category_counts = [] + compute_counts = return_counts or self._infrequent_enabled + + for i in range(n_features): + Xi = X_list[i] + + if self.categories == "auto": + result = _unique(Xi, return_counts=compute_counts) + if compute_counts: + cats, counts = result + category_counts.append(counts) + else: + cats = result + else: + if np.issubdtype(Xi.dtype, np.str_): + # Always convert string categories to objects to avoid + # unexpected string truncation for longer category labels + # passed in the constructor. + Xi_dtype = object + else: + Xi_dtype = Xi.dtype + + cats = np.array(self.categories[i], dtype=Xi_dtype) + if ( + cats.dtype == object + and isinstance(cats[0], bytes) + and Xi.dtype.kind != "S" + ): + msg = ( + f"In column {i}, the predefined categories have type 'bytes'" + " which is incompatible with values of type" + f" '{type(Xi[0]).__name__}'." + ) + raise ValueError(msg) + + # `nan` must be the last stated category + for category in cats[:-1]: + if is_scalar_nan(category): + raise ValueError( + "Nan should be the last element in user" + f" provided categories, see categories {cats}" + f" in column #{i}" + ) + + if cats.size != len(_unique(cats)): + msg = ( + f"In column {i}, the predefined categories" + " contain duplicate elements." + ) + raise ValueError(msg) + + if Xi.dtype.kind not in "OUS": + sorted_cats = np.sort(cats) + error_msg = ( + "Unsorted categories are not supported for numerical categories" + ) + # if there are nans, nan should be the last element + stop_idx = -1 if np.isnan(sorted_cats[-1]) else None + if np.any(sorted_cats[:stop_idx] != cats[:stop_idx]): + raise ValueError(error_msg) + + if handle_unknown == "error": + diff = _check_unknown(Xi, cats) + if diff: + msg = ( + "Found unknown categories {0} in column {1}" + " during fit".format(diff, i) + ) + raise ValueError(msg) + if compute_counts: + category_counts.append(_get_counts(Xi, cats)) + + self.categories_.append(cats) + + output = {"n_samples": n_samples} + if return_counts: + output["category_counts"] = category_counts + + missing_indices = {} + if return_and_ignore_missing_for_infrequent: + for feature_idx, categories_for_idx in enumerate(self.categories_): + if is_scalar_nan(categories_for_idx[-1]): + # `nan` values can only be placed in the latest position + missing_indices[feature_idx] = categories_for_idx.size - 1 + output["missing_indices"] = missing_indices + + if self._infrequent_enabled: + self._fit_infrequent_category_mapping( + n_samples, + category_counts, + missing_indices, + ) + return output + + def _transform( + self, + X, + handle_unknown="error", + force_all_finite=True, + warn_on_unknown=False, + ignore_category_indices=None, + ): + X_list, n_samples, n_features = self._check_X( + X, force_all_finite=force_all_finite + ) + self._check_feature_names(X, reset=False) + self._check_n_features(X, reset=False) + + X_int = np.zeros((n_samples, n_features), dtype=int) + X_mask = np.ones((n_samples, n_features), dtype=bool) + + columns_with_unknown = [] + for i in range(n_features): + Xi = X_list[i] + diff, valid_mask = _check_unknown(Xi, self.categories_[i], return_mask=True) + + if not np.all(valid_mask): + if handle_unknown == "error": + msg = ( + "Found unknown categories {0} in column {1}" + " during transform".format(diff, i) + ) + raise ValueError(msg) + else: + if warn_on_unknown: + columns_with_unknown.append(i) + # Set the problematic rows to an acceptable value and + # continue `The rows are marked `X_mask` and will be + # removed later. + X_mask[:, i] = valid_mask + # cast Xi into the largest string type necessary + # to handle different lengths of numpy strings + if ( + self.categories_[i].dtype.kind in ("U", "S") + and self.categories_[i].itemsize > Xi.itemsize + ): + Xi = Xi.astype(self.categories_[i].dtype) + elif self.categories_[i].dtype.kind == "O" and Xi.dtype.kind == "U": + # categories are objects and Xi are numpy strings. + # Cast Xi to an object dtype to prevent truncation + # when setting invalid values. + Xi = Xi.astype("O") + else: + Xi = Xi.copy() + + Xi[~valid_mask] = self.categories_[i][0] + # We use check_unknown=False, since _check_unknown was + # already called above. + X_int[:, i] = _encode(Xi, uniques=self.categories_[i], check_unknown=False) + if columns_with_unknown: + warnings.warn( + ( + "Found unknown categories in columns " + f"{columns_with_unknown} during transform. These " + "unknown categories will be encoded as all zeros" + ), + UserWarning, + ) + + self._map_infrequent_categories(X_int, X_mask, ignore_category_indices) + return X_int, X_mask + + @property + def infrequent_categories_(self): + """Infrequent categories for each feature.""" + # raises an AttributeError if `_infrequent_indices` is not defined + infrequent_indices = self._infrequent_indices + return [ + None if indices is None else category[indices] + for category, indices in zip(self.categories_, infrequent_indices) + ] + + def _check_infrequent_enabled(self): + """ + This functions checks whether _infrequent_enabled is True or False. + This has to be called after parameter validation in the fit function. + """ + max_categories = getattr(self, "max_categories", None) + min_frequency = getattr(self, "min_frequency", None) + self._infrequent_enabled = ( + max_categories is not None and max_categories >= 1 + ) or min_frequency is not None + + def _identify_infrequent(self, category_count, n_samples, col_idx): + """Compute the infrequent indices. + + Parameters + ---------- + category_count : ndarray of shape (n_cardinality,) + Category counts. + + n_samples : int + Number of samples. + + col_idx : int + Index of the current category. Only used for the error message. + + Returns + ------- + output : ndarray of shape (n_infrequent_categories,) or None + If there are infrequent categories, indices of infrequent + categories. Otherwise None. + """ + if isinstance(self.min_frequency, numbers.Integral): + infrequent_mask = category_count < self.min_frequency + elif isinstance(self.min_frequency, numbers.Real): + min_frequency_abs = n_samples * self.min_frequency + infrequent_mask = category_count < min_frequency_abs + else: + infrequent_mask = np.zeros(category_count.shape[0], dtype=bool) + + n_current_features = category_count.size - infrequent_mask.sum() + 1 + if self.max_categories is not None and self.max_categories < n_current_features: + # max_categories includes the one infrequent category + frequent_category_count = self.max_categories - 1 + if frequent_category_count == 0: + # All categories are infrequent + infrequent_mask[:] = True + else: + # stable sort to preserve original count order + smallest_levels = np.argsort(category_count, kind="mergesort")[ + :-frequent_category_count + ] + infrequent_mask[smallest_levels] = True + + output = np.flatnonzero(infrequent_mask) + return output if output.size > 0 else None + + def _fit_infrequent_category_mapping( + self, n_samples, category_counts, missing_indices + ): + """Fit infrequent categories. + + Defines the private attribute: `_default_to_infrequent_mappings`. For + feature `i`, `_default_to_infrequent_mappings[i]` defines the mapping + from the integer encoding returned by `super().transform()` into + infrequent categories. If `_default_to_infrequent_mappings[i]` is None, + there were no infrequent categories in the training set. + + For example if categories 0, 2 and 4 were frequent, while categories + 1, 3, 5 were infrequent for feature 7, then these categories are mapped + to a single output: + `_default_to_infrequent_mappings[7] = array([0, 3, 1, 3, 2, 3])` + + Defines private attribute: `_infrequent_indices`. `_infrequent_indices[i]` + is an array of indices such that + `categories_[i][_infrequent_indices[i]]` are all the infrequent category + labels. If the feature `i` has no infrequent categories + `_infrequent_indices[i]` is None. + + .. versionadded:: 1.1 + + Parameters + ---------- + n_samples : int + Number of samples in training set. + category_counts: list of ndarray + `category_counts[i]` is the category counts corresponding to + `self.categories_[i]`. + missing_indices : dict + Dict mapping from feature_idx to category index with a missing value. + """ + # Remove missing value from counts, so it is not considered as infrequent + if missing_indices: + category_counts_ = [] + for feature_idx, count in enumerate(category_counts): + if feature_idx in missing_indices: + category_counts_.append( + np.delete(count, missing_indices[feature_idx]) + ) + else: + category_counts_.append(count) + else: + category_counts_ = category_counts + + self._infrequent_indices = [ + self._identify_infrequent(category_count, n_samples, col_idx) + for col_idx, category_count in enumerate(category_counts_) + ] + + # compute mapping from default mapping to infrequent mapping + self._default_to_infrequent_mappings = [] + + for feature_idx, infreq_idx in enumerate(self._infrequent_indices): + cats = self.categories_[feature_idx] + # no infrequent categories + if infreq_idx is None: + self._default_to_infrequent_mappings.append(None) + continue + + n_cats = len(cats) + if feature_idx in missing_indices: + # Missing index was removed from this category when computing + # infrequent indices, thus we need to decrease the number of + # total categories when considering the infrequent mapping. + n_cats -= 1 + + # infrequent indices exist + mapping = np.empty(n_cats, dtype=np.int64) + n_infrequent_cats = infreq_idx.size + + # infrequent categories are mapped to the last element. + n_frequent_cats = n_cats - n_infrequent_cats + mapping[infreq_idx] = n_frequent_cats + + frequent_indices = np.setdiff1d(np.arange(n_cats), infreq_idx) + mapping[frequent_indices] = np.arange(n_frequent_cats) + + self._default_to_infrequent_mappings.append(mapping) + + def _map_infrequent_categories(self, X_int, X_mask, ignore_category_indices): + """Map infrequent categories to integer representing the infrequent category. + + This modifies X_int in-place. Values that were invalid based on `X_mask` + are mapped to the infrequent category if there was an infrequent + category for that feature. + + Parameters + ---------- + X_int: ndarray of shape (n_samples, n_features) + Integer encoded categories. + + X_mask: ndarray of shape (n_samples, n_features) + Bool mask for valid values in `X_int`. + + ignore_category_indices : dict + Dictionary mapping from feature_idx to category index to ignore. + Ignored indexes will not be grouped and the original ordinal encoding + will remain. + """ + if not self._infrequent_enabled: + return + + ignore_category_indices = ignore_category_indices or {} + + for col_idx in range(X_int.shape[1]): + infrequent_idx = self._infrequent_indices[col_idx] + if infrequent_idx is None: + continue + + X_int[~X_mask[:, col_idx], col_idx] = infrequent_idx[0] + if self.handle_unknown == "infrequent_if_exist": + # All the unknown values are now mapped to the + # infrequent_idx[0], which makes the unknown values valid + # This is needed in `transform` when the encoding is formed + # using `X_mask`. + X_mask[:, col_idx] = True + + # Remaps encoding in `X_int` where the infrequent categories are + # grouped together. + for i, mapping in enumerate(self._default_to_infrequent_mappings): + if mapping is None: + continue + + if i in ignore_category_indices: + # Update rows that are **not** ignored + rows_to_update = X_int[:, i] != ignore_category_indices[i] + else: + rows_to_update = slice(None) + + X_int[rows_to_update, i] = np.take(mapping, X_int[rows_to_update, i]) + + def _more_tags(self): + return {"X_types": ["2darray", "categorical"], "allow_nan": True} + + +class OneHotEncoder(_BaseEncoder): + """ + Encode categorical features as a one-hot numeric array. + + The input to this transformer should be an array-like of integers or + strings, denoting the values taken on by categorical (discrete) features. + The features are encoded using a one-hot (aka 'one-of-K' or 'dummy') + encoding scheme. This creates a binary column for each category and + returns a sparse matrix or dense array (depending on the ``sparse_output`` + parameter). + + By default, the encoder derives the categories based on the unique values + in each feature. Alternatively, you can also specify the `categories` + manually. + + This encoding is needed for feeding categorical data to many scikit-learn + estimators, notably linear models and SVMs with the standard kernels. + + Note: a one-hot encoding of y labels should use a LabelBinarizer + instead. + + Read more in the :ref:`User Guide `. + For a comparison of different encoders, refer to: + :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`. + + Parameters + ---------- + categories : 'auto' or a list of array-like, default='auto' + Categories (unique values) per feature: + + - 'auto' : Determine categories automatically from the training data. + - list : ``categories[i]`` holds the categories expected in the ith + column. The passed categories should not mix strings and numeric + values within a single feature, and should be sorted in case of + numeric values. + + The used categories can be found in the ``categories_`` attribute. + + .. versionadded:: 0.20 + + drop : {'first', 'if_binary'} or an array-like of shape (n_features,), \ + default=None + Specifies a methodology to use to drop one of the categories per + feature. This is useful in situations where perfectly collinear + features cause problems, such as when feeding the resulting data + into an unregularized linear regression model. + + However, dropping one category breaks the symmetry of the original + representation and can therefore induce a bias in downstream models, + for instance for penalized linear classification or regression models. + + - None : retain all features (the default). + - 'first' : drop the first category in each feature. If only one + category is present, the feature will be dropped entirely. + - 'if_binary' : drop the first category in each feature with two + categories. Features with 1 or more than 2 categories are + left intact. + - array : ``drop[i]`` is the category in feature ``X[:, i]`` that + should be dropped. + + When `max_categories` or `min_frequency` is configured to group + infrequent categories, the dropping behavior is handled after the + grouping. + + .. versionadded:: 0.21 + The parameter `drop` was added in 0.21. + + .. versionchanged:: 0.23 + The option `drop='if_binary'` was added in 0.23. + + .. versionchanged:: 1.1 + Support for dropping infrequent categories. + + sparse_output : bool, default=True + When ``True``, it returns a :class:`scipy.sparse.csr_matrix`, + i.e. a sparse matrix in "Compressed Sparse Row" (CSR) format. + + .. versionadded:: 1.2 + `sparse` was renamed to `sparse_output` + + dtype : number type, default=np.float64 + Desired dtype of output. + + handle_unknown : {'error', 'ignore', 'infrequent_if_exist'}, \ + default='error' + Specifies the way unknown categories are handled during :meth:`transform`. + + - 'error' : Raise an error if an unknown category is present during transform. + - 'ignore' : When an unknown category is encountered during + transform, the resulting one-hot encoded columns for this feature + will be all zeros. In the inverse transform, an unknown category + will be denoted as None. + - 'infrequent_if_exist' : When an unknown category is encountered + during transform, the resulting one-hot encoded columns for this + feature will map to the infrequent category if it exists. The + infrequent category will be mapped to the last position in the + encoding. During inverse transform, an unknown category will be + mapped to the category denoted `'infrequent'` if it exists. If the + `'infrequent'` category does not exist, then :meth:`transform` and + :meth:`inverse_transform` will handle an unknown category as with + `handle_unknown='ignore'`. Infrequent categories exist based on + `min_frequency` and `max_categories`. Read more in the + :ref:`User Guide `. + + .. versionchanged:: 1.1 + `'infrequent_if_exist'` was added to automatically handle unknown + categories and infrequent categories. + + min_frequency : int or float, default=None + Specifies the minimum frequency below which a category will be + considered infrequent. + + - If `int`, categories with a smaller cardinality will be considered + infrequent. + + - If `float`, categories with a smaller cardinality than + `min_frequency * n_samples` will be considered infrequent. + + .. versionadded:: 1.1 + Read more in the :ref:`User Guide `. + + max_categories : int, default=None + Specifies an upper limit to the number of output features for each input + feature when considering infrequent categories. If there are infrequent + categories, `max_categories` includes the category representing the + infrequent categories along with the frequent categories. If `None`, + there is no limit to the number of output features. + + .. versionadded:: 1.1 + Read more in the :ref:`User Guide `. + + feature_name_combiner : "concat" or callable, default="concat" + Callable with signature `def callable(input_feature, category)` that returns a + string. This is used to create feature names to be returned by + :meth:`get_feature_names_out`. + + `"concat"` concatenates encoded feature name and category with + `feature + "_" + str(category)`.E.g. feature X with values 1, 6, 7 create + feature names `X_1, X_6, X_7`. + + .. versionadded:: 1.3 + + Attributes + ---------- + categories_ : list of arrays + The categories of each feature determined during fitting + (in order of the features in X and corresponding with the output + of ``transform``). This includes the category specified in ``drop`` + (if any). + + drop_idx_ : array of shape (n_features,) + - ``drop_idx_[i]`` is the index in ``categories_[i]`` of the category + to be dropped for each feature. + - ``drop_idx_[i] = None`` if no category is to be dropped from the + feature with index ``i``, e.g. when `drop='if_binary'` and the + feature isn't binary. + - ``drop_idx_ = None`` if all the transformed features will be + retained. + + If infrequent categories are enabled by setting `min_frequency` or + `max_categories` to a non-default value and `drop_idx[i]` corresponds + to a infrequent category, then the entire infrequent category is + dropped. + + .. versionchanged:: 0.23 + Added the possibility to contain `None` values. + + infrequent_categories_ : list of ndarray + Defined only if infrequent categories are enabled by setting + `min_frequency` or `max_categories` to a non-default value. + `infrequent_categories_[i]` are the infrequent categories for feature + `i`. If the feature `i` has no infrequent categories + `infrequent_categories_[i]` is None. + + .. versionadded:: 1.1 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 1.0 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + feature_name_combiner : callable or None + Callable with signature `def callable(input_feature, category)` that returns a + string. This is used to create feature names to be returned by + :meth:`get_feature_names_out`. + + .. versionadded:: 1.3 + + See Also + -------- + OrdinalEncoder : Performs an ordinal (integer) + encoding of the categorical features. + TargetEncoder : Encodes categorical features using the target. + sklearn.feature_extraction.DictVectorizer : Performs a one-hot encoding of + dictionary items (also handles string-valued features). + sklearn.feature_extraction.FeatureHasher : Performs an approximate one-hot + encoding of dictionary items or strings. + LabelBinarizer : Binarizes labels in a one-vs-all + fashion. + MultiLabelBinarizer : Transforms between iterable of + iterables and a multilabel format, e.g. a (samples x classes) binary + matrix indicating the presence of a class label. + + Examples + -------- + Given a dataset with two features, we let the encoder find the unique + values per feature and transform the data to a binary one-hot encoding. + + >>> from sklearn.preprocessing import OneHotEncoder + + One can discard categories not seen during `fit`: + + >>> enc = OneHotEncoder(handle_unknown='ignore') + >>> X = [['Male', 1], ['Female', 3], ['Female', 2]] + >>> enc.fit(X) + OneHotEncoder(handle_unknown='ignore') + >>> enc.categories_ + [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)] + >>> enc.transform([['Female', 1], ['Male', 4]]).toarray() + array([[1., 0., 1., 0., 0.], + [0., 1., 0., 0., 0.]]) + >>> enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]]) + array([['Male', 1], + [None, 2]], dtype=object) + >>> enc.get_feature_names_out(['gender', 'group']) + array(['gender_Female', 'gender_Male', 'group_1', 'group_2', 'group_3'], ...) + + One can always drop the first column for each feature: + + >>> drop_enc = OneHotEncoder(drop='first').fit(X) + >>> drop_enc.categories_ + [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)] + >>> drop_enc.transform([['Female', 1], ['Male', 2]]).toarray() + array([[0., 0., 0.], + [1., 1., 0.]]) + + Or drop a column for feature only having 2 categories: + + >>> drop_binary_enc = OneHotEncoder(drop='if_binary').fit(X) + >>> drop_binary_enc.transform([['Female', 1], ['Male', 2]]).toarray() + array([[0., 1., 0., 0.], + [1., 0., 1., 0.]]) + + One can change the way feature names are created. + + >>> def custom_combiner(feature, category): + ... return str(feature) + "_" + type(category).__name__ + "_" + str(category) + >>> custom_fnames_enc = OneHotEncoder(feature_name_combiner=custom_combiner).fit(X) + >>> custom_fnames_enc.get_feature_names_out() + array(['x0_str_Female', 'x0_str_Male', 'x1_int_1', 'x1_int_2', 'x1_int_3'], + dtype=object) + + Infrequent categories are enabled by setting `max_categories` or `min_frequency`. + + >>> import numpy as np + >>> X = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object).T + >>> ohe = OneHotEncoder(max_categories=3, sparse_output=False).fit(X) + >>> ohe.infrequent_categories_ + [array(['a', 'd'], dtype=object)] + >>> ohe.transform([["a"], ["b"]]) + array([[0., 0., 1.], + [1., 0., 0.]]) + """ + + _parameter_constraints: dict = { + "categories": [StrOptions({"auto"}), list], + "drop": [StrOptions({"first", "if_binary"}), "array-like", None], + "dtype": "no_validation", # validation delegated to numpy + "handle_unknown": [StrOptions({"error", "ignore", "infrequent_if_exist"})], + "max_categories": [Interval(Integral, 1, None, closed="left"), None], + "min_frequency": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="neither"), + None, + ], + "sparse_output": ["boolean"], + "feature_name_combiner": [StrOptions({"concat"}), callable], + } + + def __init__( + self, + *, + categories="auto", + drop=None, + sparse_output=True, + dtype=np.float64, + handle_unknown="error", + min_frequency=None, + max_categories=None, + feature_name_combiner="concat", + ): + self.categories = categories + self.sparse_output = sparse_output + self.dtype = dtype + self.handle_unknown = handle_unknown + self.drop = drop + self.min_frequency = min_frequency + self.max_categories = max_categories + self.feature_name_combiner = feature_name_combiner + + def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx): + """Convert `drop_idx` into the index for infrequent categories. + + If there are no infrequent categories, then `drop_idx` is + returned. This method is called in `_set_drop_idx` when the `drop` + parameter is an array-like. + """ + if not self._infrequent_enabled: + return drop_idx + + default_to_infrequent = self._default_to_infrequent_mappings[feature_idx] + if default_to_infrequent is None: + return drop_idx + + # Raise error when explicitly dropping a category that is infrequent + infrequent_indices = self._infrequent_indices[feature_idx] + if infrequent_indices is not None and drop_idx in infrequent_indices: + categories = self.categories_[feature_idx] + raise ValueError( + f"Unable to drop category {categories[drop_idx].item()!r} from" + f" feature {feature_idx} because it is infrequent" + ) + return default_to_infrequent[drop_idx] + + def _set_drop_idx(self): + """Compute the drop indices associated with `self.categories_`. + + If `self.drop` is: + - `None`, No categories have been dropped. + - `'first'`, All zeros to drop the first category. + - `'if_binary'`, All zeros if the category is binary and `None` + otherwise. + - array-like, The indices of the categories that match the + categories in `self.drop`. If the dropped category is an infrequent + category, then the index for the infrequent category is used. This + means that the entire infrequent category is dropped. + + This methods defines a public `drop_idx_` and a private + `_drop_idx_after_grouping`. + + - `drop_idx_`: Public facing API that references the drop category in + `self.categories_`. + - `_drop_idx_after_grouping`: Used internally to drop categories *after* the + infrequent categories are grouped together. + + If there are no infrequent categories or drop is `None`, then + `drop_idx_=_drop_idx_after_grouping`. + """ + if self.drop is None: + drop_idx_after_grouping = None + elif isinstance(self.drop, str): + if self.drop == "first": + drop_idx_after_grouping = np.zeros(len(self.categories_), dtype=object) + elif self.drop == "if_binary": + n_features_out_no_drop = [len(cat) for cat in self.categories_] + if self._infrequent_enabled: + for i, infreq_idx in enumerate(self._infrequent_indices): + if infreq_idx is None: + continue + n_features_out_no_drop[i] -= infreq_idx.size - 1 + + drop_idx_after_grouping = np.array( + [ + 0 if n_features_out == 2 else None + for n_features_out in n_features_out_no_drop + ], + dtype=object, + ) + + else: + drop_array = np.asarray(self.drop, dtype=object) + droplen = len(drop_array) + + if droplen != len(self.categories_): + msg = ( + "`drop` should have length equal to the number " + "of features ({}), got {}" + ) + raise ValueError(msg.format(len(self.categories_), droplen)) + missing_drops = [] + drop_indices = [] + for feature_idx, (drop_val, cat_list) in enumerate( + zip(drop_array, self.categories_) + ): + if not is_scalar_nan(drop_val): + drop_idx = np.where(cat_list == drop_val)[0] + if drop_idx.size: # found drop idx + drop_indices.append( + self._map_drop_idx_to_infrequent(feature_idx, drop_idx[0]) + ) + else: + missing_drops.append((feature_idx, drop_val)) + continue + + # drop_val is nan, find nan in categories manually + if is_scalar_nan(cat_list[-1]): + drop_indices.append( + self._map_drop_idx_to_infrequent(feature_idx, cat_list.size - 1) + ) + else: # nan is missing + missing_drops.append((feature_idx, drop_val)) + + if any(missing_drops): + msg = ( + "The following categories were supposed to be " + "dropped, but were not found in the training " + "data.\n{}".format( + "\n".join( + [ + "Category: {}, Feature: {}".format(c, v) + for c, v in missing_drops + ] + ) + ) + ) + raise ValueError(msg) + drop_idx_after_grouping = np.array(drop_indices, dtype=object) + + # `_drop_idx_after_grouping` are the categories to drop *after* the infrequent + # categories are grouped together. If needed, we remap `drop_idx` back + # to the categories seen in `self.categories_`. + self._drop_idx_after_grouping = drop_idx_after_grouping + + if not self._infrequent_enabled or drop_idx_after_grouping is None: + self.drop_idx_ = self._drop_idx_after_grouping + else: + drop_idx_ = [] + for feature_idx, drop_idx in enumerate(drop_idx_after_grouping): + default_to_infrequent = self._default_to_infrequent_mappings[ + feature_idx + ] + if drop_idx is None or default_to_infrequent is None: + orig_drop_idx = drop_idx + else: + orig_drop_idx = np.flatnonzero(default_to_infrequent == drop_idx)[0] + + drop_idx_.append(orig_drop_idx) + + self.drop_idx_ = np.asarray(drop_idx_, dtype=object) + + def _compute_transformed_categories(self, i, remove_dropped=True): + """Compute the transformed categories used for column `i`. + + 1. If there are infrequent categories, the category is named + 'infrequent_sklearn'. + 2. Dropped columns are removed when remove_dropped=True. + """ + cats = self.categories_[i] + + if self._infrequent_enabled: + infreq_map = self._default_to_infrequent_mappings[i] + if infreq_map is not None: + frequent_mask = infreq_map < infreq_map.max() + infrequent_cat = "infrequent_sklearn" + # infrequent category is always at the end + cats = np.concatenate( + (cats[frequent_mask], np.array([infrequent_cat], dtype=object)) + ) + + if remove_dropped: + cats = self._remove_dropped_categories(cats, i) + return cats + + def _remove_dropped_categories(self, categories, i): + """Remove dropped categories.""" + if ( + self._drop_idx_after_grouping is not None + and self._drop_idx_after_grouping[i] is not None + ): + return np.delete(categories, self._drop_idx_after_grouping[i]) + return categories + + def _compute_n_features_outs(self): + """Compute the n_features_out for each input feature.""" + output = [len(cats) for cats in self.categories_] + + if self._drop_idx_after_grouping is not None: + for i, drop_idx in enumerate(self._drop_idx_after_grouping): + if drop_idx is not None: + output[i] -= 1 + + if not self._infrequent_enabled: + return output + + # infrequent is enabled, the number of features out are reduced + # because the infrequent categories are grouped together + for i, infreq_idx in enumerate(self._infrequent_indices): + if infreq_idx is None: + continue + output[i] -= infreq_idx.size - 1 + + return output + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """ + Fit OneHotEncoder to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to determine the categories of each feature. + + y : None + Ignored. This parameter exists only for compatibility with + :class:`~sklearn.pipeline.Pipeline`. + + Returns + ------- + self + Fitted encoder. + """ + self._fit( + X, + handle_unknown=self.handle_unknown, + force_all_finite="allow-nan", + ) + self._set_drop_idx() + self._n_features_outs = self._compute_n_features_outs() + return self + + def transform(self, X): + """ + Transform X using one-hot encoding. + + If `sparse_output=True` (default), it returns an instance of + :class:`scipy.sparse._csr.csr_matrix` (CSR format). + + If there are infrequent categories for a feature, set by specifying + `max_categories` or `min_frequency`, the infrequent categories are + grouped into a single category. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to encode. + + Returns + ------- + X_out : {ndarray, sparse matrix} of shape \ + (n_samples, n_encoded_features) + Transformed input. If `sparse_output=True`, a sparse matrix will be + returned. + """ + check_is_fitted(self) + transform_output = _get_output_config("transform", estimator=self)["dense"] + if transform_output != "default" and self.sparse_output: + capitalize_transform_output = transform_output.capitalize() + raise ValueError( + f"{capitalize_transform_output} output does not support sparse data." + f" Set sparse_output=False to output {transform_output} dataframes or" + f" disable {capitalize_transform_output} output via" + '` ohe.set_output(transform="default").' + ) + + # validation of X happens in _check_X called by _transform + warn_on_unknown = self.drop is not None and self.handle_unknown in { + "ignore", + "infrequent_if_exist", + } + X_int, X_mask = self._transform( + X, + handle_unknown=self.handle_unknown, + force_all_finite="allow-nan", + warn_on_unknown=warn_on_unknown, + ) + + n_samples, n_features = X_int.shape + + if self._drop_idx_after_grouping is not None: + to_drop = self._drop_idx_after_grouping.copy() + # We remove all the dropped categories from mask, and decrement all + # categories that occur after them to avoid an empty column. + keep_cells = X_int != to_drop + for i, cats in enumerate(self.categories_): + # drop='if_binary' but feature isn't binary + if to_drop[i] is None: + # set to cardinality to not drop from X_int + to_drop[i] = len(cats) + + to_drop = to_drop.reshape(1, -1) + X_int[X_int > to_drop] -= 1 + X_mask &= keep_cells + + mask = X_mask.ravel() + feature_indices = np.cumsum([0] + self._n_features_outs) + indices = (X_int + feature_indices[:-1]).ravel()[mask] + + indptr = np.empty(n_samples + 1, dtype=int) + indptr[0] = 0 + np.sum(X_mask, axis=1, out=indptr[1:], dtype=indptr.dtype) + np.cumsum(indptr[1:], out=indptr[1:]) + data = np.ones(indptr[-1]) + + out = sparse.csr_matrix( + (data, indices, indptr), + shape=(n_samples, feature_indices[-1]), + dtype=self.dtype, + ) + if not self.sparse_output: + return out.toarray() + else: + return out + + def inverse_transform(self, X): + """ + Convert the data back to the original representation. + + When unknown categories are encountered (all zeros in the + one-hot encoding), ``None`` is used to represent this category. If the + feature with the unknown category has a dropped category, the dropped + category will be its inverse. + + For a given input feature, if there is an infrequent category, + 'infrequent_sklearn' will be used to represent the infrequent category. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape \ + (n_samples, n_encoded_features) + The transformed data. + + Returns + ------- + X_tr : ndarray of shape (n_samples, n_features) + Inverse transformed array. + """ + check_is_fitted(self) + X = check_array(X, accept_sparse="csr") + + n_samples, _ = X.shape + n_features = len(self.categories_) + + n_features_out = np.sum(self._n_features_outs) + + # validate shape of passed X + msg = ( + "Shape of the passed X data is not correct. Expected {0} columns, got {1}." + ) + if X.shape[1] != n_features_out: + raise ValueError(msg.format(n_features_out, X.shape[1])) + + transformed_features = [ + self._compute_transformed_categories(i, remove_dropped=False) + for i, _ in enumerate(self.categories_) + ] + + # create resulting array of appropriate dtype + dt = np.result_type(*[cat.dtype for cat in transformed_features]) + X_tr = np.empty((n_samples, n_features), dtype=dt) + + j = 0 + found_unknown = {} + + if self._infrequent_enabled: + infrequent_indices = self._infrequent_indices + else: + infrequent_indices = [None] * n_features + + for i in range(n_features): + cats_wo_dropped = self._remove_dropped_categories( + transformed_features[i], i + ) + n_categories = cats_wo_dropped.shape[0] + + # Only happens if there was a column with a unique + # category. In this case we just fill the column with this + # unique category value. + if n_categories == 0: + X_tr[:, i] = self.categories_[i][self._drop_idx_after_grouping[i]] + j += n_categories + continue + sub = X[:, j : j + n_categories] + # for sparse X argmax returns 2D matrix, ensure 1D array + labels = np.asarray(sub.argmax(axis=1)).flatten() + X_tr[:, i] = cats_wo_dropped[labels] + + if self.handle_unknown == "ignore" or ( + self.handle_unknown == "infrequent_if_exist" + and infrequent_indices[i] is None + ): + unknown = np.asarray(sub.sum(axis=1) == 0).flatten() + # ignored unknown categories: we have a row of all zero + if unknown.any(): + # if categories were dropped then unknown categories will + # be mapped to the dropped category + if ( + self._drop_idx_after_grouping is None + or self._drop_idx_after_grouping[i] is None + ): + found_unknown[i] = unknown + else: + X_tr[unknown, i] = self.categories_[i][ + self._drop_idx_after_grouping[i] + ] + else: + dropped = np.asarray(sub.sum(axis=1) == 0).flatten() + if dropped.any(): + if self._drop_idx_after_grouping is None: + all_zero_samples = np.flatnonzero(dropped) + raise ValueError( + f"Samples {all_zero_samples} can not be inverted " + "when drop=None and handle_unknown='error' " + "because they contain all zeros" + ) + # we can safely assume that all of the nulls in each column + # are the dropped value + drop_idx = self._drop_idx_after_grouping[i] + X_tr[dropped, i] = transformed_features[i][drop_idx] + + j += n_categories + + # if ignored are found: potentially need to upcast result to + # insert None values + if found_unknown: + if X_tr.dtype != object: + X_tr = X_tr.astype(object) + + for idx, mask in found_unknown.items(): + X_tr[mask, idx] = None + + return X_tr + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self) + input_features = _check_feature_names_in(self, input_features) + cats = [ + self._compute_transformed_categories(i) + for i, _ in enumerate(self.categories_) + ] + + name_combiner = self._check_get_feature_name_combiner() + feature_names = [] + for i in range(len(cats)): + names = [name_combiner(input_features[i], t) for t in cats[i]] + feature_names.extend(names) + + return np.array(feature_names, dtype=object) + + def _check_get_feature_name_combiner(self): + if self.feature_name_combiner == "concat": + return lambda feature, category: feature + "_" + str(category) + else: # callable + dry_run_combiner = self.feature_name_combiner("feature", "category") + if not isinstance(dry_run_combiner, str): + raise TypeError( + "When `feature_name_combiner` is a callable, it should return a " + f"Python string. Got {type(dry_run_combiner)} instead." + ) + return self.feature_name_combiner + + +class OrdinalEncoder(OneToOneFeatureMixin, _BaseEncoder): + """ + Encode categorical features as an integer array. + + The input to this transformer should be an array-like of integers or + strings, denoting the values taken on by categorical (discrete) features. + The features are converted to ordinal integers. This results in + a single column of integers (0 to n_categories - 1) per feature. + + Read more in the :ref:`User Guide `. + For a comparison of different encoders, refer to: + :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`. + + .. versionadded:: 0.20 + + Parameters + ---------- + categories : 'auto' or a list of array-like, default='auto' + Categories (unique values) per feature: + + - 'auto' : Determine categories automatically from the training data. + - list : ``categories[i]`` holds the categories expected in the ith + column. The passed categories should not mix strings and numeric + values, and should be sorted in case of numeric values. + + The used categories can be found in the ``categories_`` attribute. + + dtype : number type, default=np.float64 + Desired dtype of output. + + handle_unknown : {'error', 'use_encoded_value'}, default='error' + When set to 'error' an error will be raised in case an unknown + categorical feature is present during transform. When set to + 'use_encoded_value', the encoded value of unknown categories will be + set to the value given for the parameter `unknown_value`. In + :meth:`inverse_transform`, an unknown category will be denoted as None. + + .. versionadded:: 0.24 + + unknown_value : int or np.nan, default=None + When the parameter handle_unknown is set to 'use_encoded_value', this + parameter is required and will set the encoded value of unknown + categories. It has to be distinct from the values used to encode any of + the categories in `fit`. If set to np.nan, the `dtype` parameter must + be a float dtype. + + .. versionadded:: 0.24 + + encoded_missing_value : int or np.nan, default=np.nan + Encoded value of missing categories. If set to `np.nan`, then the `dtype` + parameter must be a float dtype. + + .. versionadded:: 1.1 + + min_frequency : int or float, default=None + Specifies the minimum frequency below which a category will be + considered infrequent. + + - If `int`, categories with a smaller cardinality will be considered + infrequent. + + - If `float`, categories with a smaller cardinality than + `min_frequency * n_samples` will be considered infrequent. + + .. versionadded:: 1.3 + Read more in the :ref:`User Guide `. + + max_categories : int, default=None + Specifies an upper limit to the number of output categories for each input + feature when considering infrequent categories. If there are infrequent + categories, `max_categories` includes the category representing the + infrequent categories along with the frequent categories. If `None`, + there is no limit to the number of output features. + + `max_categories` do **not** take into account missing or unknown + categories. Setting `unknown_value` or `encoded_missing_value` to an + integer will increase the number of unique integer codes by one each. + This can result in up to `max_categories + 2` integer codes. + + .. versionadded:: 1.3 + Read more in the :ref:`User Guide `. + + Attributes + ---------- + categories_ : list of arrays + The categories of each feature determined during ``fit`` (in order of + the features in X and corresponding with the output of ``transform``). + This does not include categories that weren't seen during ``fit``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 1.0 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + infrequent_categories_ : list of ndarray + Defined only if infrequent categories are enabled by setting + `min_frequency` or `max_categories` to a non-default value. + `infrequent_categories_[i]` are the infrequent categories for feature + `i`. If the feature `i` has no infrequent categories + `infrequent_categories_[i]` is None. + + .. versionadded:: 1.3 + + See Also + -------- + OneHotEncoder : Performs a one-hot encoding of categorical features. This encoding + is suitable for low to medium cardinality categorical variables, both in + supervised and unsupervised settings. + TargetEncoder : Encodes categorical features using supervised signal + in a classification or regression pipeline. This encoding is typically + suitable for high cardinality categorical variables. + LabelEncoder : Encodes target labels with values between 0 and + ``n_classes-1``. + + Notes + ----- + With a high proportion of `nan` values, inferring categories becomes slow with + Python versions before 3.10. The handling of `nan` values was improved + from Python 3.10 onwards, (c.f. + `bpo-43475 `_). + + Examples + -------- + Given a dataset with two features, we let the encoder find the unique + values per feature and transform the data to an ordinal encoding. + + >>> from sklearn.preprocessing import OrdinalEncoder + >>> enc = OrdinalEncoder() + >>> X = [['Male', 1], ['Female', 3], ['Female', 2]] + >>> enc.fit(X) + OrdinalEncoder() + >>> enc.categories_ + [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)] + >>> enc.transform([['Female', 3], ['Male', 1]]) + array([[0., 2.], + [1., 0.]]) + + >>> enc.inverse_transform([[1, 0], [0, 1]]) + array([['Male', 1], + ['Female', 2]], dtype=object) + + By default, :class:`OrdinalEncoder` is lenient towards missing values by + propagating them. + + >>> import numpy as np + >>> X = [['Male', 1], ['Female', 3], ['Female', np.nan]] + >>> enc.fit_transform(X) + array([[ 1., 0.], + [ 0., 1.], + [ 0., nan]]) + + You can use the parameter `encoded_missing_value` to encode missing values. + + >>> enc.set_params(encoded_missing_value=-1).fit_transform(X) + array([[ 1., 0.], + [ 0., 1.], + [ 0., -1.]]) + + Infrequent categories are enabled by setting `max_categories` or `min_frequency`. + In the following example, "a" and "d" are considered infrequent and grouped + together into a single category, "b" and "c" are their own categories, unknown + values are encoded as 3 and missing values are encoded as 4. + + >>> X_train = np.array( + ... [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3 + [np.nan]], + ... dtype=object).T + >>> enc = OrdinalEncoder( + ... handle_unknown="use_encoded_value", unknown_value=3, + ... max_categories=3, encoded_missing_value=4) + >>> _ = enc.fit(X_train) + >>> X_test = np.array([["a"], ["b"], ["c"], ["d"], ["e"], [np.nan]], dtype=object) + >>> enc.transform(X_test) + array([[2.], + [0.], + [1.], + [2.], + [3.], + [4.]]) + """ + + _parameter_constraints: dict = { + "categories": [StrOptions({"auto"}), list], + "dtype": "no_validation", # validation delegated to numpy + "encoded_missing_value": [Integral, type(np.nan)], + "handle_unknown": [StrOptions({"error", "use_encoded_value"})], + "unknown_value": [Integral, type(np.nan), None], + "max_categories": [Interval(Integral, 1, None, closed="left"), None], + "min_frequency": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="neither"), + None, + ], + } + + def __init__( + self, + *, + categories="auto", + dtype=np.float64, + handle_unknown="error", + unknown_value=None, + encoded_missing_value=np.nan, + min_frequency=None, + max_categories=None, + ): + self.categories = categories + self.dtype = dtype + self.handle_unknown = handle_unknown + self.unknown_value = unknown_value + self.encoded_missing_value = encoded_missing_value + self.min_frequency = min_frequency + self.max_categories = max_categories + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """ + Fit the OrdinalEncoder to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to determine the categories of each feature. + + y : None + Ignored. This parameter exists only for compatibility with + :class:`~sklearn.pipeline.Pipeline`. + + Returns + ------- + self : object + Fitted encoder. + """ + if self.handle_unknown == "use_encoded_value": + if is_scalar_nan(self.unknown_value): + if np.dtype(self.dtype).kind != "f": + raise ValueError( + "When unknown_value is np.nan, the dtype " + "parameter should be " + f"a float dtype. Got {self.dtype}." + ) + elif not isinstance(self.unknown_value, numbers.Integral): + raise TypeError( + "unknown_value should be an integer or " + "np.nan when " + "handle_unknown is 'use_encoded_value', " + f"got {self.unknown_value}." + ) + elif self.unknown_value is not None: + raise TypeError( + "unknown_value should only be set when " + "handle_unknown is 'use_encoded_value', " + f"got {self.unknown_value}." + ) + + # `_fit` will only raise an error when `self.handle_unknown="error"` + fit_results = self._fit( + X, + handle_unknown=self.handle_unknown, + force_all_finite="allow-nan", + return_and_ignore_missing_for_infrequent=True, + ) + self._missing_indices = fit_results["missing_indices"] + + cardinalities = [len(categories) for categories in self.categories_] + if self._infrequent_enabled: + # Cardinality decreases because the infrequent categories are grouped + # together + for feature_idx, infrequent in enumerate(self.infrequent_categories_): + if infrequent is not None: + cardinalities[feature_idx] -= len(infrequent) + + # missing values are not considered part of the cardinality + # when considering unknown categories or encoded_missing_value + for cat_idx, categories_for_idx in enumerate(self.categories_): + if is_scalar_nan(categories_for_idx[-1]): + cardinalities[cat_idx] -= 1 + + if self.handle_unknown == "use_encoded_value": + for cardinality in cardinalities: + if 0 <= self.unknown_value < cardinality: + raise ValueError( + "The used value for unknown_value " + f"{self.unknown_value} is one of the " + "values already used for encoding the " + "seen categories." + ) + + if self._missing_indices: + if np.dtype(self.dtype).kind != "f" and is_scalar_nan( + self.encoded_missing_value + ): + raise ValueError( + "There are missing values in features " + f"{list(self._missing_indices)}. For OrdinalEncoder to " + f"encode missing values with dtype: {self.dtype}, set " + "encoded_missing_value to a non-nan value, or " + "set dtype to a float" + ) + + if not is_scalar_nan(self.encoded_missing_value): + # Features are invalid when they contain a missing category + # and encoded_missing_value was already used to encode a + # known category + invalid_features = [ + cat_idx + for cat_idx, cardinality in enumerate(cardinalities) + if cat_idx in self._missing_indices + and 0 <= self.encoded_missing_value < cardinality + ] + + if invalid_features: + # Use feature names if they are available + if hasattr(self, "feature_names_in_"): + invalid_features = self.feature_names_in_[invalid_features] + raise ValueError( + f"encoded_missing_value ({self.encoded_missing_value}) " + "is already used to encode a known category in features: " + f"{invalid_features}" + ) + + return self + + def transform(self, X): + """ + Transform X to ordinal codes. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to encode. + + Returns + ------- + X_out : ndarray of shape (n_samples, n_features) + Transformed input. + """ + check_is_fitted(self, "categories_") + X_int, X_mask = self._transform( + X, + handle_unknown=self.handle_unknown, + force_all_finite="allow-nan", + ignore_category_indices=self._missing_indices, + ) + X_trans = X_int.astype(self.dtype, copy=False) + + for cat_idx, missing_idx in self._missing_indices.items(): + X_missing_mask = X_int[:, cat_idx] == missing_idx + X_trans[X_missing_mask, cat_idx] = self.encoded_missing_value + + # create separate category for unknown values + if self.handle_unknown == "use_encoded_value": + X_trans[~X_mask] = self.unknown_value + return X_trans + + def inverse_transform(self, X): + """ + Convert the data back to the original representation. + + Parameters + ---------- + X : array-like of shape (n_samples, n_encoded_features) + The transformed data. + + Returns + ------- + X_tr : ndarray of shape (n_samples, n_features) + Inverse transformed array. + """ + check_is_fitted(self) + X = check_array(X, force_all_finite="allow-nan") + + n_samples, _ = X.shape + n_features = len(self.categories_) + + # validate shape of passed X + msg = ( + "Shape of the passed X data is not correct. Expected {0} columns, got {1}." + ) + if X.shape[1] != n_features: + raise ValueError(msg.format(n_features, X.shape[1])) + + # create resulting array of appropriate dtype + dt = np.result_type(*[cat.dtype for cat in self.categories_]) + X_tr = np.empty((n_samples, n_features), dtype=dt) + + found_unknown = {} + infrequent_masks = {} + + infrequent_indices = getattr(self, "_infrequent_indices", None) + + for i in range(n_features): + labels = X[:, i] + + # replace values of X[:, i] that were nan with actual indices + if i in self._missing_indices: + X_i_mask = _get_mask(labels, self.encoded_missing_value) + labels[X_i_mask] = self._missing_indices[i] + + rows_to_update = slice(None) + categories = self.categories_[i] + + if infrequent_indices is not None and infrequent_indices[i] is not None: + # Compute mask for frequent categories + infrequent_encoding_value = len(categories) - len(infrequent_indices[i]) + infrequent_masks[i] = labels == infrequent_encoding_value + rows_to_update = ~infrequent_masks[i] + + # Remap categories to be only frequent categories. The infrequent + # categories will be mapped to "infrequent_sklearn" later + frequent_categories_mask = np.ones_like(categories, dtype=bool) + frequent_categories_mask[infrequent_indices[i]] = False + categories = categories[frequent_categories_mask] + + if self.handle_unknown == "use_encoded_value": + unknown_labels = _get_mask(labels, self.unknown_value) + found_unknown[i] = unknown_labels + + known_labels = ~unknown_labels + if isinstance(rows_to_update, np.ndarray): + rows_to_update &= known_labels + else: + rows_to_update = known_labels + + labels_int = labels[rows_to_update].astype("int64", copy=False) + X_tr[rows_to_update, i] = categories[labels_int] + + if found_unknown or infrequent_masks: + X_tr = X_tr.astype(object, copy=False) + + # insert None values for unknown values + if found_unknown: + for idx, mask in found_unknown.items(): + X_tr[mask, idx] = None + + if infrequent_masks: + for idx, mask in infrequent_masks.items(): + X_tr[mask, idx] = "infrequent_sklearn" + + return X_tr diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/_function_transformer.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_function_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..921bd6a01fb713e9452fd7a934f48b22295ed1b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_function_transformer.py @@ -0,0 +1,431 @@ +import warnings + +import numpy as np + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils._param_validation import StrOptions +from ..utils._set_output import ADAPTERS_MANAGER, _get_output_config +from ..utils.metaestimators import available_if +from ..utils.validation import ( + _allclose_dense_sparse, + _check_feature_names_in, + _get_feature_names, + _is_pandas_df, + _is_polars_df, + check_array, +) + + +def _get_adapter_from_container(container): + """Get the adapter that nows how to handle such container. + + See :class:`sklearn.utils._set_output.ContainerAdapterProtocol` for more + details. + """ + module_name = container.__class__.__module__.split(".")[0] + try: + return ADAPTERS_MANAGER.adapters[module_name] + except KeyError as exc: + available_adapters = list(ADAPTERS_MANAGER.adapters.keys()) + raise ValueError( + "The container does not have a registered adapter in scikit-learn. " + f"Available adapters are: {available_adapters} while the container " + f"provided is: {container!r}." + ) from exc + + +def _identity(X): + """The identity function.""" + return X + + +class FunctionTransformer(TransformerMixin, BaseEstimator): + """Constructs a transformer from an arbitrary callable. + + A FunctionTransformer forwards its X (and optionally y) arguments to a + user-defined function or function object and returns the result of this + function. This is useful for stateless transformations such as taking the + log of frequencies, doing custom scaling, etc. + + Note: If a lambda is used as the function, then the resulting + transformer will not be pickleable. + + .. versionadded:: 0.17 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + func : callable, default=None + The callable to use for the transformation. This will be passed + the same arguments as transform, with args and kwargs forwarded. + If func is None, then func will be the identity function. + + inverse_func : callable, default=None + The callable to use for the inverse transformation. This will be + passed the same arguments as inverse transform, with args and + kwargs forwarded. If inverse_func is None, then inverse_func + will be the identity function. + + validate : bool, default=False + Indicate that the input X array should be checked before calling + ``func``. The possibilities are: + + - If False, there is no input validation. + - If True, then X will be converted to a 2-dimensional NumPy array or + sparse matrix. If the conversion is not possible an exception is + raised. + + .. versionchanged:: 0.22 + The default of ``validate`` changed from True to False. + + accept_sparse : bool, default=False + Indicate that func accepts a sparse matrix as input. If validate is + False, this has no effect. Otherwise, if accept_sparse is false, + sparse matrix inputs will cause an exception to be raised. + + check_inverse : bool, default=True + Whether to check that or ``func`` followed by ``inverse_func`` leads to + the original inputs. It can be used for a sanity check, raising a + warning when the condition is not fulfilled. + + .. versionadded:: 0.20 + + feature_names_out : callable, 'one-to-one' or None, default=None + Determines the list of feature names that will be returned by the + `get_feature_names_out` method. If it is 'one-to-one', then the output + feature names will be equal to the input feature names. If it is a + callable, then it must take two positional arguments: this + `FunctionTransformer` (`self`) and an array-like of input feature names + (`input_features`). It must return an array-like of output feature + names. The `get_feature_names_out` method is only defined if + `feature_names_out` is not None. + + See ``get_feature_names_out`` for more details. + + .. versionadded:: 1.1 + + kw_args : dict, default=None + Dictionary of additional keyword arguments to pass to func. + + .. versionadded:: 0.18 + + inv_kw_args : dict, default=None + Dictionary of additional keyword arguments to pass to inverse_func. + + .. versionadded:: 0.18 + + Attributes + ---------- + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` has feature + names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MaxAbsScaler : Scale each feature by its maximum absolute value. + StandardScaler : Standardize features by removing the mean and + scaling to unit variance. + LabelBinarizer : Binarize labels in a one-vs-all fashion. + MultiLabelBinarizer : Transform between iterable of iterables + and a multilabel format. + + Notes + ----- + If `func` returns an output with a `columns` attribute, then the columns is enforced + to be consistent with the output of `get_feature_names_out`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import FunctionTransformer + >>> transformer = FunctionTransformer(np.log1p) + >>> X = np.array([[0, 1], [2, 3]]) + >>> transformer.transform(X) + array([[0. , 0.6931...], + [1.0986..., 1.3862...]]) + """ + + _parameter_constraints: dict = { + "func": [callable, None], + "inverse_func": [callable, None], + "validate": ["boolean"], + "accept_sparse": ["boolean"], + "check_inverse": ["boolean"], + "feature_names_out": [callable, StrOptions({"one-to-one"}), None], + "kw_args": [dict, None], + "inv_kw_args": [dict, None], + } + + def __init__( + self, + func=None, + inverse_func=None, + *, + validate=False, + accept_sparse=False, + check_inverse=True, + feature_names_out=None, + kw_args=None, + inv_kw_args=None, + ): + self.func = func + self.inverse_func = inverse_func + self.validate = validate + self.accept_sparse = accept_sparse + self.check_inverse = check_inverse + self.feature_names_out = feature_names_out + self.kw_args = kw_args + self.inv_kw_args = inv_kw_args + + def _check_input(self, X, *, reset): + if self.validate: + return self._validate_data(X, accept_sparse=self.accept_sparse, reset=reset) + elif reset: + # Set feature_names_in_ and n_features_in_ even if validate=False + # We run this only when reset==True to store the attributes but not + # validate them, because validate=False + self._check_n_features(X, reset=reset) + self._check_feature_names(X, reset=reset) + return X + + def _check_inverse_transform(self, X): + """Check that func and inverse_func are the inverse.""" + idx_selected = slice(None, None, max(1, X.shape[0] // 100)) + X_round_trip = self.inverse_transform(self.transform(X[idx_selected])) + + if hasattr(X, "dtype"): + dtypes = [X.dtype] + elif hasattr(X, "dtypes"): + # Dataframes can have multiple dtypes + dtypes = X.dtypes + + if not all(np.issubdtype(d, np.number) for d in dtypes): + raise ValueError( + "'check_inverse' is only supported when all the elements in `X` is" + " numerical." + ) + + if not _allclose_dense_sparse(X[idx_selected], X_round_trip): + warnings.warn( + ( + "The provided functions are not strictly" + " inverse of each other. If you are sure you" + " want to proceed regardless, set" + " 'check_inverse=False'." + ), + UserWarning, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit transformer by checking X. + + If ``validate`` is ``True``, ``X`` will be checked. + + Parameters + ---------- + X : {array-like, sparse-matrix} of shape (n_samples, n_features) \ + if `validate=True` else any object that `func` can handle + Input array. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + FunctionTransformer class instance. + """ + X = self._check_input(X, reset=True) + if self.check_inverse and not (self.func is None or self.inverse_func is None): + self._check_inverse_transform(X) + return self + + def transform(self, X): + """Transform X using the forward function. + + Parameters + ---------- + X : {array-like, sparse-matrix} of shape (n_samples, n_features) \ + if `validate=True` else any object that `func` can handle + Input array. + + Returns + ------- + X_out : array-like, shape (n_samples, n_features) + Transformed input. + """ + X = self._check_input(X, reset=False) + out = self._transform(X, func=self.func, kw_args=self.kw_args) + output_config = _get_output_config("transform", self)["dense"] + + if hasattr(out, "columns") and self.feature_names_out is not None: + # check the consistency between the column provided by `transform` and + # the the column names provided by `get_feature_names_out`. + feature_names_out = self.get_feature_names_out() + if list(out.columns) != list(feature_names_out): + # we can override the column names of the output if it is inconsistent + # with the column names provided by `get_feature_names_out` in the + # following cases: + # * `func` preserved the column names between the input and the output + # * the input column names are all numbers + # * the output is requested to be a DataFrame (pandas or polars) + feature_names_in = getattr( + X, "feature_names_in_", _get_feature_names(X) + ) + same_feature_names_in_out = feature_names_in is not None and list( + feature_names_in + ) == list(out.columns) + not_all_str_columns = not all( + isinstance(col, str) for col in out.columns + ) + if same_feature_names_in_out or not_all_str_columns: + adapter = _get_adapter_from_container(out) + out = adapter.create_container( + X_output=out, + X_original=out, + columns=feature_names_out, + inplace=False, + ) + else: + raise ValueError( + "The output generated by `func` have different column names " + "than the ones provided by `get_feature_names_out`. " + f"Got output with columns names: {list(out.columns)} and " + "`get_feature_names_out` returned: " + f"{list(self.get_feature_names_out())}. " + "The column names can be overridden by setting " + "`set_output(transform='pandas')` or " + "`set_output(transform='polars')` such that the column names " + "are set to the names provided by `get_feature_names_out`." + ) + + if self.feature_names_out is None: + warn_msg = ( + "When `set_output` is configured to be '{0}', `func` should return " + "a {0} DataFrame to follow the `set_output` API or `feature_names_out`" + " should be defined." + ) + if output_config == "pandas" and not _is_pandas_df(out): + warnings.warn(warn_msg.format("pandas")) + elif output_config == "polars" and not _is_polars_df(out): + warnings.warn(warn_msg.format("polars")) + + return out + + def inverse_transform(self, X): + """Transform X using the inverse function. + + Parameters + ---------- + X : {array-like, sparse-matrix} of shape (n_samples, n_features) \ + if `validate=True` else any object that `inverse_func` can handle + Input array. + + Returns + ------- + X_out : array-like, shape (n_samples, n_features) + Transformed input. + """ + if self.validate: + X = check_array(X, accept_sparse=self.accept_sparse) + return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args) + + @available_if(lambda self: self.feature_names_out is not None) + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + This method is only defined if `feature_names_out` is not None. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input feature names. + + - If `input_features` is None, then `feature_names_in_` is + used as the input feature names. If `feature_names_in_` is not + defined, then names are generated: + `[x0, x1, ..., x(n_features_in_ - 1)]`. + - If `input_features` is array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + + - If `feature_names_out` is 'one-to-one', the input feature names + are returned (see `input_features` above). This requires + `feature_names_in_` and/or `n_features_in_` to be defined, which + is done automatically if `validate=True`. Alternatively, you can + set them in `func`. + - If `feature_names_out` is a callable, then it is called with two + arguments, `self` and `input_features`, and its return value is + returned by this method. + """ + if hasattr(self, "n_features_in_") or input_features is not None: + input_features = _check_feature_names_in(self, input_features) + if self.feature_names_out == "one-to-one": + names_out = input_features + elif callable(self.feature_names_out): + names_out = self.feature_names_out(self, input_features) + else: + raise ValueError( + f"feature_names_out={self.feature_names_out!r} is invalid. " + 'It must either be "one-to-one" or a callable with two ' + "arguments: the function transformer and an array-like of " + "input feature names. The callable must return an array-like " + "of output feature names." + ) + return np.asarray(names_out, dtype=object) + + def _transform(self, X, func=None, kw_args=None): + if func is None: + func = _identity + + return func(X, **(kw_args if kw_args else {})) + + def __sklearn_is_fitted__(self): + """Return True since FunctionTransfomer is stateless.""" + return True + + def _more_tags(self): + return {"no_validation": not self.validate, "stateless": True} + + def set_output(self, *, transform=None): + """Set output container. + + See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` + for an example on how to use the API. + + Parameters + ---------- + transform : {"default", "pandas"}, default=None + Configure output of `transform` and `fit_transform`. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + .. versionadded:: 1.4 + `"polars"` option was added. + + Returns + ------- + self : estimator instance + Estimator instance. + """ + if not hasattr(self, "_sklearn_output_config"): + self._sklearn_output_config = {} + + self._sklearn_output_config["transform"] = transform + return self diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/_label.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_label.py new file mode 100644 index 0000000000000000000000000000000000000000..bd009d52a685398c6f94fe1019c8437e95b98313 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_label.py @@ -0,0 +1,951 @@ +# Authors: Alexandre Gramfort +# Mathieu Blondel +# Olivier Grisel +# Andreas Mueller +# Joel Nothman +# Hamzeh Alsalhi +# License: BSD 3 clause + +import array +import itertools +import warnings +from collections import defaultdict +from numbers import Integral + +import numpy as np +import scipy.sparse as sp + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import column_or_1d +from ..utils._encode import _encode, _unique +from ..utils._param_validation import Interval, validate_params +from ..utils.multiclass import type_of_target, unique_labels +from ..utils.sparsefuncs import min_max_axis +from ..utils.validation import _num_samples, check_array, check_is_fitted + +__all__ = [ + "label_binarize", + "LabelBinarizer", + "LabelEncoder", + "MultiLabelBinarizer", +] + + +class LabelEncoder(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None): + """Encode target labels with value between 0 and n_classes-1. + + This transformer should be used to encode target values, *i.e.* `y`, and + not the input `X`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.12 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + Holds the label for each class. + + See Also + -------- + OrdinalEncoder : Encode categorical features using an ordinal encoding + scheme. + OneHotEncoder : Encode categorical features as a one-hot numeric array. + + Examples + -------- + `LabelEncoder` can be used to normalize labels. + + >>> from sklearn.preprocessing import LabelEncoder + >>> le = LabelEncoder() + >>> le.fit([1, 2, 2, 6]) + LabelEncoder() + >>> le.classes_ + array([1, 2, 6]) + >>> le.transform([1, 1, 2, 6]) + array([0, 0, 1, 2]...) + >>> le.inverse_transform([0, 0, 1, 2]) + array([1, 1, 2, 6]) + + It can also be used to transform non-numerical labels (as long as they are + hashable and comparable) to numerical labels. + + >>> le = LabelEncoder() + >>> le.fit(["paris", "paris", "tokyo", "amsterdam"]) + LabelEncoder() + >>> list(le.classes_) + ['amsterdam', 'paris', 'tokyo'] + >>> le.transform(["tokyo", "tokyo", "paris"]) + array([2, 2, 1]...) + >>> list(le.inverse_transform([2, 2, 1])) + ['tokyo', 'tokyo', 'paris'] + """ + + def fit(self, y): + """Fit label encoder. + + Parameters + ---------- + y : array-like of shape (n_samples,) + Target values. + + Returns + ------- + self : returns an instance of self. + Fitted label encoder. + """ + y = column_or_1d(y, warn=True) + self.classes_ = _unique(y) + return self + + def fit_transform(self, y): + """Fit label encoder and return encoded labels. + + Parameters + ---------- + y : array-like of shape (n_samples,) + Target values. + + Returns + ------- + y : array-like of shape (n_samples,) + Encoded labels. + """ + y = column_or_1d(y, warn=True) + self.classes_, y = _unique(y, return_inverse=True) + return y + + def transform(self, y): + """Transform labels to normalized encoding. + + Parameters + ---------- + y : array-like of shape (n_samples,) + Target values. + + Returns + ------- + y : array-like of shape (n_samples,) + Labels as normalized encodings. + """ + check_is_fitted(self) + y = column_or_1d(y, dtype=self.classes_.dtype, warn=True) + # transform of empty array is empty array + if _num_samples(y) == 0: + return np.array([]) + + return _encode(y, uniques=self.classes_) + + def inverse_transform(self, y): + """Transform labels back to original encoding. + + Parameters + ---------- + y : ndarray of shape (n_samples,) + Target values. + + Returns + ------- + y : ndarray of shape (n_samples,) + Original encoding. + """ + check_is_fitted(self) + y = column_or_1d(y, warn=True) + # inverse transform of empty array is empty array + if _num_samples(y) == 0: + return np.array([]) + + diff = np.setdiff1d(y, np.arange(len(self.classes_))) + if len(diff): + raise ValueError("y contains previously unseen labels: %s" % str(diff)) + y = np.asarray(y) + return self.classes_[y] + + def _more_tags(self): + return {"X_types": ["1dlabels"]} + + +class LabelBinarizer(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None): + """Binarize labels in a one-vs-all fashion. + + Several regression and binary classification algorithms are + available in scikit-learn. A simple way to extend these algorithms + to the multi-class classification case is to use the so-called + one-vs-all scheme. + + At learning time, this simply consists in learning one regressor + or binary classifier per class. In doing so, one needs to convert + multi-class labels to binary labels (belong or does not belong + to the class). `LabelBinarizer` makes this process easy with the + transform method. + + At prediction time, one assigns the class for which the corresponding + model gave the greatest confidence. `LabelBinarizer` makes this easy + with the :meth:`inverse_transform` method. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + neg_label : int, default=0 + Value with which negative labels must be encoded. + + pos_label : int, default=1 + Value with which positive labels must be encoded. + + sparse_output : bool, default=False + True if the returned array from transform is desired to be in sparse + CSR format. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + Holds the label for each class. + + y_type_ : str + Represents the type of the target data as evaluated by + :func:`~sklearn.utils.multiclass.type_of_target`. Possible type are + 'continuous', 'continuous-multioutput', 'binary', 'multiclass', + 'multiclass-multioutput', 'multilabel-indicator', and 'unknown'. + + sparse_input_ : bool + `True` if the input data to transform is given as a sparse matrix, + `False` otherwise. + + See Also + -------- + label_binarize : Function to perform the transform operation of + LabelBinarizer with fixed classes. + OneHotEncoder : Encode categorical features using a one-hot aka one-of-K + scheme. + + Examples + -------- + >>> from sklearn.preprocessing import LabelBinarizer + >>> lb = LabelBinarizer() + >>> lb.fit([1, 2, 6, 4, 2]) + LabelBinarizer() + >>> lb.classes_ + array([1, 2, 4, 6]) + >>> lb.transform([1, 6]) + array([[1, 0, 0, 0], + [0, 0, 0, 1]]) + + Binary targets transform to a column vector + + >>> lb = LabelBinarizer() + >>> lb.fit_transform(['yes', 'no', 'no', 'yes']) + array([[1], + [0], + [0], + [1]]) + + Passing a 2D matrix for multilabel classification + + >>> import numpy as np + >>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]])) + LabelBinarizer() + >>> lb.classes_ + array([0, 1, 2]) + >>> lb.transform([0, 1, 2, 1]) + array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 1, 0]]) + """ + + _parameter_constraints: dict = { + "neg_label": [Integral], + "pos_label": [Integral], + "sparse_output": ["boolean"], + } + + def __init__(self, *, neg_label=0, pos_label=1, sparse_output=False): + self.neg_label = neg_label + self.pos_label = pos_label + self.sparse_output = sparse_output + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, y): + """Fit label binarizer. + + Parameters + ---------- + y : ndarray of shape (n_samples,) or (n_samples, n_classes) + Target values. The 2-d matrix should only contain 0 and 1, + represents multilabel classification. + + Returns + ------- + self : object + Returns the instance itself. + """ + if self.neg_label >= self.pos_label: + raise ValueError( + f"neg_label={self.neg_label} must be strictly less than " + f"pos_label={self.pos_label}." + ) + + if self.sparse_output and (self.pos_label == 0 or self.neg_label != 0): + raise ValueError( + "Sparse binarization is only supported with non " + "zero pos_label and zero neg_label, got " + f"pos_label={self.pos_label} and neg_label={self.neg_label}" + ) + + self.y_type_ = type_of_target(y, input_name="y") + + if "multioutput" in self.y_type_: + raise ValueError( + "Multioutput target data is not supported with label binarization" + ) + if _num_samples(y) == 0: + raise ValueError("y has 0 samples: %r" % y) + + self.sparse_input_ = sp.issparse(y) + self.classes_ = unique_labels(y) + return self + + def fit_transform(self, y): + """Fit label binarizer/transform multi-class labels to binary labels. + + The output of transform is sometimes referred to as + the 1-of-K coding scheme. + + Parameters + ---------- + y : {ndarray, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_classes) + Target values. The 2-d matrix should only contain 0 and 1, + represents multilabel classification. Sparse matrix can be + CSR, CSC, COO, DOK, or LIL. + + Returns + ------- + Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) + Shape will be (n_samples, 1) for binary problems. Sparse matrix + will be of CSR format. + """ + return self.fit(y).transform(y) + + def transform(self, y): + """Transform multi-class labels to binary labels. + + The output of transform is sometimes referred to by some authors as + the 1-of-K coding scheme. + + Parameters + ---------- + y : {array, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_classes) + Target values. The 2-d matrix should only contain 0 and 1, + represents multilabel classification. Sparse matrix can be + CSR, CSC, COO, DOK, or LIL. + + Returns + ------- + Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) + Shape will be (n_samples, 1) for binary problems. Sparse matrix + will be of CSR format. + """ + check_is_fitted(self) + + y_is_multilabel = type_of_target(y).startswith("multilabel") + if y_is_multilabel and not self.y_type_.startswith("multilabel"): + raise ValueError("The object was not fitted with multilabel input.") + + return label_binarize( + y, + classes=self.classes_, + pos_label=self.pos_label, + neg_label=self.neg_label, + sparse_output=self.sparse_output, + ) + + def inverse_transform(self, Y, threshold=None): + """Transform binary labels back to multi-class labels. + + Parameters + ---------- + Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) + Target values. All sparse matrices are converted to CSR before + inverse transformation. + + threshold : float, default=None + Threshold used in the binary and multi-label cases. + + Use 0 when ``Y`` contains the output of :term:`decision_function` + (classifier). + Use 0.5 when ``Y`` contains the output of :term:`predict_proba`. + + If None, the threshold is assumed to be half way between + neg_label and pos_label. + + Returns + ------- + y : {ndarray, sparse matrix} of shape (n_samples,) + Target values. Sparse matrix will be of CSR format. + + Notes + ----- + In the case when the binary labels are fractional + (probabilistic), :meth:`inverse_transform` chooses the class with the + greatest value. Typically, this allows to use the output of a + linear model's :term:`decision_function` method directly as the input + of :meth:`inverse_transform`. + """ + check_is_fitted(self) + + if threshold is None: + threshold = (self.pos_label + self.neg_label) / 2.0 + + if self.y_type_ == "multiclass": + y_inv = _inverse_binarize_multiclass(Y, self.classes_) + else: + y_inv = _inverse_binarize_thresholding( + Y, self.y_type_, self.classes_, threshold + ) + + if self.sparse_input_: + y_inv = sp.csr_matrix(y_inv) + elif sp.issparse(y_inv): + y_inv = y_inv.toarray() + + return y_inv + + def _more_tags(self): + return {"X_types": ["1dlabels"]} + + +@validate_params( + { + "y": ["array-like"], + "classes": ["array-like"], + "neg_label": [Interval(Integral, None, None, closed="neither")], + "pos_label": [Interval(Integral, None, None, closed="neither")], + "sparse_output": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def label_binarize(y, *, classes, neg_label=0, pos_label=1, sparse_output=False): + """Binarize labels in a one-vs-all fashion. + + Several regression and binary classification algorithms are + available in scikit-learn. A simple way to extend these algorithms + to the multi-class classification case is to use the so-called + one-vs-all scheme. + + This function makes it possible to compute this transformation for a + fixed set of class labels known ahead of time. + + Parameters + ---------- + y : array-like + Sequence of integer labels or multilabel data to encode. + + classes : array-like of shape (n_classes,) + Uniquely holds the label for each class. + + neg_label : int, default=0 + Value with which negative labels must be encoded. + + pos_label : int, default=1 + Value with which positive labels must be encoded. + + sparse_output : bool, default=False, + Set to true if output binary array is desired in CSR sparse format. + + Returns + ------- + Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) + Shape will be (n_samples, 1) for binary problems. Sparse matrix will + be of CSR format. + + See Also + -------- + LabelBinarizer : Class used to wrap the functionality of label_binarize and + allow for fitting to classes independently of the transform operation. + + Examples + -------- + >>> from sklearn.preprocessing import label_binarize + >>> label_binarize([1, 6], classes=[1, 2, 4, 6]) + array([[1, 0, 0, 0], + [0, 0, 0, 1]]) + + The class ordering is preserved: + + >>> label_binarize([1, 6], classes=[1, 6, 4, 2]) + array([[1, 0, 0, 0], + [0, 1, 0, 0]]) + + Binary targets transform to a column vector + + >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes']) + array([[1], + [0], + [0], + [1]]) + """ + if not isinstance(y, list): + # XXX Workaround that will be removed when list of list format is + # dropped + y = check_array( + y, input_name="y", accept_sparse="csr", ensure_2d=False, dtype=None + ) + else: + if _num_samples(y) == 0: + raise ValueError("y has 0 samples: %r" % y) + if neg_label >= pos_label: + raise ValueError( + "neg_label={0} must be strictly less than pos_label={1}.".format( + neg_label, pos_label + ) + ) + + if sparse_output and (pos_label == 0 or neg_label != 0): + raise ValueError( + "Sparse binarization is only supported with non " + "zero pos_label and zero neg_label, got " + "pos_label={0} and neg_label={1}" + "".format(pos_label, neg_label) + ) + + # To account for pos_label == 0 in the dense case + pos_switch = pos_label == 0 + if pos_switch: + pos_label = -neg_label + + y_type = type_of_target(y) + if "multioutput" in y_type: + raise ValueError( + "Multioutput target data is not supported with label binarization" + ) + if y_type == "unknown": + raise ValueError("The type of target data is not known") + + n_samples = y.shape[0] if sp.issparse(y) else len(y) + n_classes = len(classes) + classes = np.asarray(classes) + + if y_type == "binary": + if n_classes == 1: + if sparse_output: + return sp.csr_matrix((n_samples, 1), dtype=int) + else: + Y = np.zeros((len(y), 1), dtype=int) + Y += neg_label + return Y + elif len(classes) >= 3: + y_type = "multiclass" + + sorted_class = np.sort(classes) + if y_type == "multilabel-indicator": + y_n_classes = y.shape[1] if hasattr(y, "shape") else len(y[0]) + if classes.size != y_n_classes: + raise ValueError( + "classes {0} mismatch with the labels {1} found in the data".format( + classes, unique_labels(y) + ) + ) + + if y_type in ("binary", "multiclass"): + y = column_or_1d(y) + + # pick out the known labels from y + y_in_classes = np.isin(y, classes) + y_seen = y[y_in_classes] + indices = np.searchsorted(sorted_class, y_seen) + indptr = np.hstack((0, np.cumsum(y_in_classes))) + + data = np.empty_like(indices) + data.fill(pos_label) + Y = sp.csr_matrix((data, indices, indptr), shape=(n_samples, n_classes)) + elif y_type == "multilabel-indicator": + Y = sp.csr_matrix(y) + if pos_label != 1: + data = np.empty_like(Y.data) + data.fill(pos_label) + Y.data = data + else: + raise ValueError( + "%s target data is not supported with label binarization" % y_type + ) + + if not sparse_output: + Y = Y.toarray() + Y = Y.astype(int, copy=False) + + if neg_label != 0: + Y[Y == 0] = neg_label + + if pos_switch: + Y[Y == pos_label] = 0 + else: + Y.data = Y.data.astype(int, copy=False) + + # preserve label ordering + if np.any(classes != sorted_class): + indices = np.searchsorted(sorted_class, classes) + Y = Y[:, indices] + + if y_type == "binary": + if sparse_output: + Y = Y.getcol(-1) + else: + Y = Y[:, -1].reshape((-1, 1)) + + return Y + + +def _inverse_binarize_multiclass(y, classes): + """Inverse label binarization transformation for multiclass. + + Multiclass uses the maximal score instead of a threshold. + """ + classes = np.asarray(classes) + + if sp.issparse(y): + # Find the argmax for each row in y where y is a CSR matrix + + y = y.tocsr() + n_samples, n_outputs = y.shape + outputs = np.arange(n_outputs) + row_max = min_max_axis(y, 1)[1] + row_nnz = np.diff(y.indptr) + + y_data_repeated_max = np.repeat(row_max, row_nnz) + # picks out all indices obtaining the maximum per row + y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data) + + # For corner case where last row has a max of 0 + if row_max[-1] == 0: + y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)]) + + # Gets the index of the first argmax in each row from y_i_all_argmax + index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1]) + # first argmax of each row + y_ind_ext = np.append(y.indices, [0]) + y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]] + # Handle rows of all 0 + y_i_argmax[np.where(row_nnz == 0)[0]] = 0 + + # Handles rows with max of 0 that contain negative numbers + samples = np.arange(n_samples)[(row_nnz > 0) & (row_max.ravel() == 0)] + for i in samples: + ind = y.indices[y.indptr[i] : y.indptr[i + 1]] + y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0] + + return classes[y_i_argmax] + else: + return classes.take(y.argmax(axis=1), mode="clip") + + +def _inverse_binarize_thresholding(y, output_type, classes, threshold): + """Inverse label binarization transformation using thresholding.""" + + if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2: + raise ValueError("output_type='binary', but y.shape = {0}".format(y.shape)) + + if output_type != "binary" and y.shape[1] != len(classes): + raise ValueError( + "The number of class is not equal to the number of dimension of y." + ) + + classes = np.asarray(classes) + + # Perform thresholding + if sp.issparse(y): + if threshold > 0: + if y.format not in ("csr", "csc"): + y = y.tocsr() + y.data = np.array(y.data > threshold, dtype=int) + y.eliminate_zeros() + else: + y = np.array(y.toarray() > threshold, dtype=int) + else: + y = np.array(y > threshold, dtype=int) + + # Inverse transform data + if output_type == "binary": + if sp.issparse(y): + y = y.toarray() + if y.ndim == 2 and y.shape[1] == 2: + return classes[y[:, 1]] + else: + if len(classes) == 1: + return np.repeat(classes[0], len(y)) + else: + return classes[y.ravel()] + + elif output_type == "multilabel-indicator": + return y + + else: + raise ValueError("{0} format is not supported".format(output_type)) + + +class MultiLabelBinarizer(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None): + """Transform between iterable of iterables and a multilabel format. + + Although a list of sets or tuples is a very intuitive format for multilabel + data, it is unwieldy to process. This transformer converts between this + intuitive format and the supported multilabel format: a (samples x classes) + binary matrix indicating the presence of a class label. + + Parameters + ---------- + classes : array-like of shape (n_classes,), default=None + Indicates an ordering for the class labels. + All entries should be unique (cannot contain duplicate classes). + + sparse_output : bool, default=False + Set to True if output binary array is desired in CSR sparse format. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + A copy of the `classes` parameter when provided. + Otherwise it corresponds to the sorted set of classes found + when fitting. + + See Also + -------- + OneHotEncoder : Encode categorical features using a one-hot aka one-of-K + scheme. + + Examples + -------- + >>> from sklearn.preprocessing import MultiLabelBinarizer + >>> mlb = MultiLabelBinarizer() + >>> mlb.fit_transform([(1, 2), (3,)]) + array([[1, 1, 0], + [0, 0, 1]]) + >>> mlb.classes_ + array([1, 2, 3]) + + >>> mlb.fit_transform([{'sci-fi', 'thriller'}, {'comedy'}]) + array([[0, 1, 1], + [1, 0, 0]]) + >>> list(mlb.classes_) + ['comedy', 'sci-fi', 'thriller'] + + A common mistake is to pass in a list, which leads to the following issue: + + >>> mlb = MultiLabelBinarizer() + >>> mlb.fit(['sci-fi', 'thriller', 'comedy']) + MultiLabelBinarizer() + >>> mlb.classes_ + array(['-', 'c', 'd', 'e', 'f', 'h', 'i', 'l', 'm', 'o', 'r', 's', 't', + 'y'], dtype=object) + + To correct this, the list of labels should be passed in as: + + >>> mlb = MultiLabelBinarizer() + >>> mlb.fit([['sci-fi', 'thriller', 'comedy']]) + MultiLabelBinarizer() + >>> mlb.classes_ + array(['comedy', 'sci-fi', 'thriller'], dtype=object) + """ + + _parameter_constraints: dict = { + "classes": ["array-like", None], + "sparse_output": ["boolean"], + } + + def __init__(self, *, classes=None, sparse_output=False): + self.classes = classes + self.sparse_output = sparse_output + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, y): + """Fit the label sets binarizer, storing :term:`classes_`. + + Parameters + ---------- + y : iterable of iterables + A set of labels (any orderable and hashable object) for each + sample. If the `classes` parameter is set, `y` will not be + iterated. + + Returns + ------- + self : object + Fitted estimator. + """ + self._cached_dict = None + + if self.classes is None: + classes = sorted(set(itertools.chain.from_iterable(y))) + elif len(set(self.classes)) < len(self.classes): + raise ValueError( + "The classes argument contains duplicate " + "classes. Remove these duplicates before passing " + "them to MultiLabelBinarizer." + ) + else: + classes = self.classes + dtype = int if all(isinstance(c, int) for c in classes) else object + self.classes_ = np.empty(len(classes), dtype=dtype) + self.classes_[:] = classes + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, y): + """Fit the label sets binarizer and transform the given label sets. + + Parameters + ---------- + y : iterable of iterables + A set of labels (any orderable and hashable object) for each + sample. If the `classes` parameter is set, `y` will not be + iterated. + + Returns + ------- + y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes) + A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` + is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR + format. + """ + if self.classes is not None: + return self.fit(y).transform(y) + + self._cached_dict = None + + # Automatically increment on new class + class_mapping = defaultdict(int) + class_mapping.default_factory = class_mapping.__len__ + yt = self._transform(y, class_mapping) + + # sort classes and reorder columns + tmp = sorted(class_mapping, key=class_mapping.get) + + # (make safe for tuples) + dtype = int if all(isinstance(c, int) for c in tmp) else object + class_mapping = np.empty(len(tmp), dtype=dtype) + class_mapping[:] = tmp + self.classes_, inverse = np.unique(class_mapping, return_inverse=True) + # ensure yt.indices keeps its current dtype + yt.indices = np.asarray(inverse[yt.indices], dtype=yt.indices.dtype) + + if not self.sparse_output: + yt = yt.toarray() + + return yt + + def transform(self, y): + """Transform the given label sets. + + Parameters + ---------- + y : iterable of iterables + A set of labels (any orderable and hashable object) for each + sample. If the `classes` parameter is set, `y` will not be + iterated. + + Returns + ------- + y_indicator : array or CSR matrix, shape (n_samples, n_classes) + A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in + `y[i]`, and 0 otherwise. + """ + check_is_fitted(self) + + class_to_index = self._build_cache() + yt = self._transform(y, class_to_index) + + if not self.sparse_output: + yt = yt.toarray() + + return yt + + def _build_cache(self): + if self._cached_dict is None: + self._cached_dict = dict(zip(self.classes_, range(len(self.classes_)))) + + return self._cached_dict + + def _transform(self, y, class_mapping): + """Transforms the label sets with a given mapping. + + Parameters + ---------- + y : iterable of iterables + A set of labels (any orderable and hashable object) for each + sample. If the `classes` parameter is set, `y` will not be + iterated. + + class_mapping : Mapping + Maps from label to column index in label indicator matrix. + + Returns + ------- + y_indicator : sparse matrix of shape (n_samples, n_classes) + Label indicator matrix. Will be of CSR format. + """ + indices = array.array("i") + indptr = array.array("i", [0]) + unknown = set() + for labels in y: + index = set() + for label in labels: + try: + index.add(class_mapping[label]) + except KeyError: + unknown.add(label) + indices.extend(index) + indptr.append(len(indices)) + if unknown: + warnings.warn( + "unknown class(es) {0} will be ignored".format(sorted(unknown, key=str)) + ) + data = np.ones(len(indices), dtype=int) + + return sp.csr_matrix( + (data, indices, indptr), shape=(len(indptr) - 1, len(class_mapping)) + ) + + def inverse_transform(self, yt): + """Transform the given indicator matrix into label sets. + + Parameters + ---------- + yt : {ndarray, sparse matrix} of shape (n_samples, n_classes) + A matrix containing only 1s ands 0s. + + Returns + ------- + y : list of tuples + The set of labels for each sample such that `y[i]` consists of + `classes_[j]` for each `yt[i, j] == 1`. + """ + check_is_fitted(self) + + if yt.shape[1] != len(self.classes_): + raise ValueError( + "Expected indicator for {0} classes, but got {1}".format( + len(self.classes_), yt.shape[1] + ) + ) + + if sp.issparse(yt): + yt = yt.tocsr() + if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0: + raise ValueError("Expected only 0s and 1s in label indicator.") + return [ + tuple(self.classes_.take(yt.indices[start:end])) + for start, end in zip(yt.indptr[:-1], yt.indptr[1:]) + ] + else: + unexpected = np.setdiff1d(yt, [0, 1]) + if len(unexpected) > 0: + raise ValueError( + "Expected only 0s and 1s in label indicator. Also got {0}".format( + unexpected + ) + ) + return [tuple(self.classes_.compress(indicators)) for indicators in yt] + + def _more_tags(self): + return {"X_types": ["2dlabels"]} diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/_polynomial.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..2512f411a5a9c20cb3c182b258b54e7e716496e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_polynomial.py @@ -0,0 +1,1172 @@ +""" +This file contains preprocessing tools based on polynomials. +""" +import collections +from itertools import chain, combinations +from itertools import combinations_with_replacement as combinations_w_r +from numbers import Integral + +import numpy as np +from scipy import sparse +from scipy.interpolate import BSpline +from scipy.special import comb + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import check_array +from ..utils._param_validation import Interval, StrOptions +from ..utils.fixes import parse_version, sp_version +from ..utils.stats import _weighted_percentile +from ..utils.validation import ( + FLOAT_DTYPES, + _check_feature_names_in, + _check_sample_weight, + check_is_fitted, +) +from ._csr_polynomial_expansion import ( + _calc_expanded_nnz, + _calc_total_nnz, + _csr_polynomial_expansion, +) + +__all__ = [ + "PolynomialFeatures", + "SplineTransformer", +] + + +def _create_expansion(X, interaction_only, deg, n_features, cumulative_size=0): + """Helper function for creating and appending sparse expansion matrices""" + + total_nnz = _calc_total_nnz(X.indptr, interaction_only, deg) + expanded_col = _calc_expanded_nnz(n_features, interaction_only, deg) + + if expanded_col == 0: + return None + # This only checks whether each block needs 64bit integers upon + # expansion. We prefer to keep int32 indexing where we can, + # since currently SciPy's CSR construction downcasts when possible, + # so we prefer to avoid an unnecessary cast. The dtype may still + # change in the concatenation process if needed. + # See: https://github.com/scipy/scipy/issues/16569 + max_indices = expanded_col - 1 + max_indptr = total_nnz + max_int32 = np.iinfo(np.int32).max + needs_int64 = max(max_indices, max_indptr) > max_int32 + index_dtype = np.int64 if needs_int64 else np.int32 + + # This is a pretty specific bug that is hard to work around by a user, + # hence we do not detail the entire bug and all possible avoidance + # mechnasisms. Instead we recommend upgrading scipy or shrinking their data. + cumulative_size += expanded_col + if ( + sp_version < parse_version("1.8.0") + and cumulative_size - 1 > max_int32 + and not needs_int64 + ): + raise ValueError( + "In scipy versions `<1.8.0`, the function `scipy.sparse.hstack`" + " sometimes produces negative columns when the output shape contains" + " `n_cols` too large to be represented by a 32bit signed" + " integer. To avoid this error, either use a version" + " of scipy `>=1.8.0` or alter the `PolynomialFeatures`" + " transformer to produce fewer than 2^31 output features." + ) + + # Result of the expansion, modified in place by the + # `_csr_polynomial_expansion` routine. + expanded_data = np.empty(shape=total_nnz, dtype=X.data.dtype) + expanded_indices = np.empty(shape=total_nnz, dtype=index_dtype) + expanded_indptr = np.empty(shape=X.indptr.shape[0], dtype=index_dtype) + _csr_polynomial_expansion( + X.data, + X.indices, + X.indptr, + X.shape[1], + expanded_data, + expanded_indices, + expanded_indptr, + interaction_only, + deg, + ) + return sparse.csr_matrix( + (expanded_data, expanded_indices, expanded_indptr), + shape=(X.indptr.shape[0] - 1, expanded_col), + dtype=X.dtype, + ) + + +class PolynomialFeatures(TransformerMixin, BaseEstimator): + """Generate polynomial and interaction features. + + Generate a new feature matrix consisting of all polynomial combinations + of the features with degree less than or equal to the specified degree. + For example, if an input sample is two dimensional and of the form + [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + degree : int or tuple (min_degree, max_degree), default=2 + If a single int is given, it specifies the maximal degree of the + polynomial features. If a tuple `(min_degree, max_degree)` is passed, + then `min_degree` is the minimum and `max_degree` is the maximum + polynomial degree of the generated features. Note that `min_degree=0` + and `min_degree=1` are equivalent as outputting the degree zero term is + determined by `include_bias`. + + interaction_only : bool, default=False + If `True`, only interaction features are produced: features that are + products of at most `degree` *distinct* input features, i.e. terms with + power of 2 or higher of the same input feature are excluded: + + - included: `x[0]`, `x[1]`, `x[0] * x[1]`, etc. + - excluded: `x[0] ** 2`, `x[0] ** 2 * x[1]`, etc. + + include_bias : bool, default=True + If `True` (default), then include a bias column, the feature in which + all polynomial powers are zero (i.e. a column of ones - acts as an + intercept term in a linear model). + + order : {'C', 'F'}, default='C' + Order of output array in the dense case. `'F'` order is faster to + compute, but may slow down subsequent estimators. + + .. versionadded:: 0.21 + + Attributes + ---------- + powers_ : ndarray of shape (`n_output_features_`, `n_features_in_`) + `powers_[i, j]` is the exponent of the jth input in the ith output. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_output_features_ : int + The total number of polynomial output features. The number of output + features is computed by iterating over all suitably sized combinations + of input features. + + See Also + -------- + SplineTransformer : Transformer that generates univariate B-spline bases + for features. + + Notes + ----- + Be aware that the number of features in the output array scales + polynomially in the number of features of the input array, and + exponentially in the degree. High degrees can cause overfitting. + + See :ref:`examples/linear_model/plot_polynomial_interpolation.py + ` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import PolynomialFeatures + >>> X = np.arange(6).reshape(3, 2) + >>> X + array([[0, 1], + [2, 3], + [4, 5]]) + >>> poly = PolynomialFeatures(2) + >>> poly.fit_transform(X) + array([[ 1., 0., 1., 0., 0., 1.], + [ 1., 2., 3., 4., 6., 9.], + [ 1., 4., 5., 16., 20., 25.]]) + >>> poly = PolynomialFeatures(interaction_only=True) + >>> poly.fit_transform(X) + array([[ 1., 0., 1., 0.], + [ 1., 2., 3., 6.], + [ 1., 4., 5., 20.]]) + """ + + _parameter_constraints: dict = { + "degree": [Interval(Integral, 0, None, closed="left"), "array-like"], + "interaction_only": ["boolean"], + "include_bias": ["boolean"], + "order": [StrOptions({"C", "F"})], + } + + def __init__( + self, degree=2, *, interaction_only=False, include_bias=True, order="C" + ): + self.degree = degree + self.interaction_only = interaction_only + self.include_bias = include_bias + self.order = order + + @staticmethod + def _combinations( + n_features, min_degree, max_degree, interaction_only, include_bias + ): + comb = combinations if interaction_only else combinations_w_r + start = max(1, min_degree) + iter = chain.from_iterable( + comb(range(n_features), i) for i in range(start, max_degree + 1) + ) + if include_bias: + iter = chain(comb(range(n_features), 0), iter) + return iter + + @staticmethod + def _num_combinations( + n_features, min_degree, max_degree, interaction_only, include_bias + ): + """Calculate number of terms in polynomial expansion + + This should be equivalent to counting the number of terms returned by + _combinations(...) but much faster. + """ + + if interaction_only: + combinations = sum( + [ + comb(n_features, i, exact=True) + for i in range(max(1, min_degree), min(max_degree, n_features) + 1) + ] + ) + else: + combinations = comb(n_features + max_degree, max_degree, exact=True) - 1 + if min_degree > 0: + d = min_degree - 1 + combinations -= comb(n_features + d, d, exact=True) - 1 + + if include_bias: + combinations += 1 + + return combinations + + @property + def powers_(self): + """Exponent for each of the inputs in the output.""" + check_is_fitted(self) + + combinations = self._combinations( + n_features=self.n_features_in_, + min_degree=self._min_degree, + max_degree=self._max_degree, + interaction_only=self.interaction_only, + include_bias=self.include_bias, + ) + return np.vstack( + [np.bincount(c, minlength=self.n_features_in_) for c in combinations] + ) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features is None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + powers = self.powers_ + input_features = _check_feature_names_in(self, input_features) + feature_names = [] + for row in powers: + inds = np.where(row)[0] + if len(inds): + name = " ".join( + ( + "%s^%d" % (input_features[ind], exp) + if exp != 1 + else input_features[ind] + ) + for ind, exp in zip(inds, row[inds]) + ) + else: + name = "1" + feature_names.append(name) + return np.asarray(feature_names, dtype=object) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """ + Compute number of output features. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Fitted transformer. + """ + _, n_features = self._validate_data(X, accept_sparse=True).shape + + if isinstance(self.degree, Integral): + if self.degree == 0 and not self.include_bias: + raise ValueError( + "Setting degree to zero and include_bias to False would result in" + " an empty output array." + ) + + self._min_degree = 0 + self._max_degree = self.degree + elif ( + isinstance(self.degree, collections.abc.Iterable) and len(self.degree) == 2 + ): + self._min_degree, self._max_degree = self.degree + if not ( + isinstance(self._min_degree, Integral) + and isinstance(self._max_degree, Integral) + and self._min_degree >= 0 + and self._min_degree <= self._max_degree + ): + raise ValueError( + "degree=(min_degree, max_degree) must " + "be non-negative integers that fulfil " + "min_degree <= max_degree, got " + f"{self.degree}." + ) + elif self._max_degree == 0 and not self.include_bias: + raise ValueError( + "Setting both min_degree and max_degree to zero and include_bias to" + " False would result in an empty output array." + ) + else: + raise ValueError( + "degree must be a non-negative int or tuple " + "(min_degree, max_degree), got " + f"{self.degree}." + ) + + self.n_output_features_ = self._num_combinations( + n_features=n_features, + min_degree=self._min_degree, + max_degree=self._max_degree, + interaction_only=self.interaction_only, + include_bias=self.include_bias, + ) + if self.n_output_features_ > np.iinfo(np.intp).max: + msg = ( + "The output that would result from the current configuration would" + f" have {self.n_output_features_} features which is too large to be" + f" indexed by {np.intp().dtype.name}. Please change some or all of the" + " following:\n- The number of features in the input, currently" + f" {n_features=}\n- The range of degrees to calculate, currently" + f" [{self._min_degree}, {self._max_degree}]\n- Whether to include only" + f" interaction terms, currently {self.interaction_only}\n- Whether to" + f" include a bias term, currently {self.include_bias}." + ) + if ( + np.intp == np.int32 + and self.n_output_features_ <= np.iinfo(np.int64).max + ): # pragma: nocover + msg += ( + "\nNote that the current Python runtime has a limited 32 bit " + "address space and that this configuration would have been " + "admissible if run on a 64 bit Python runtime." + ) + raise ValueError(msg) + # We also record the number of output features for + # _max_degree = 0 + self._n_out_full = self._num_combinations( + n_features=n_features, + min_degree=0, + max_degree=self._max_degree, + interaction_only=self.interaction_only, + include_bias=self.include_bias, + ) + + return self + + def transform(self, X): + """Transform data to polynomial features. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to transform, row by row. + + Prefer CSR over CSC for sparse input (for speed), but CSC is + required if the degree is 4 or higher. If the degree is less than + 4 and the input format is CSC, it will be converted to CSR, have + its polynomial features generated, then converted back to CSC. + + If the degree is 2 or 3, the method described in "Leveraging + Sparsity to Speed Up Polynomial Feature Expansions of CSR Matrices + Using K-Simplex Numbers" by Andrew Nystrom and John Hughes is + used, which is much faster than the method used on CSC input. For + this reason, a CSC input will be converted to CSR, and the output + will be converted back to CSC prior to being returned, hence the + preference of CSR. + + Returns + ------- + XP : {ndarray, sparse matrix} of shape (n_samples, NP) + The matrix of features, where `NP` is the number of polynomial + features generated from the combination of inputs. If a sparse + matrix is provided, it will be converted into a sparse + `csr_matrix`. + """ + check_is_fitted(self) + + X = self._validate_data( + X, order="F", dtype=FLOAT_DTYPES, reset=False, accept_sparse=("csr", "csc") + ) + + n_samples, n_features = X.shape + max_int32 = np.iinfo(np.int32).max + if sparse.issparse(X) and X.format == "csr": + if self._max_degree > 3: + return self.transform(X.tocsc()).tocsr() + to_stack = [] + if self.include_bias: + to_stack.append( + sparse.csr_matrix(np.ones(shape=(n_samples, 1), dtype=X.dtype)) + ) + if self._min_degree <= 1 and self._max_degree > 0: + to_stack.append(X) + + cumulative_size = sum(mat.shape[1] for mat in to_stack) + for deg in range(max(2, self._min_degree), self._max_degree + 1): + expanded = _create_expansion( + X=X, + interaction_only=self.interaction_only, + deg=deg, + n_features=n_features, + cumulative_size=cumulative_size, + ) + if expanded is not None: + to_stack.append(expanded) + cumulative_size += expanded.shape[1] + if len(to_stack) == 0: + # edge case: deal with empty matrix + XP = sparse.csr_matrix((n_samples, 0), dtype=X.dtype) + else: + # `scipy.sparse.hstack` breaks in scipy<1.9.2 + # when `n_output_features_ > max_int32` + all_int32 = all(mat.indices.dtype == np.int32 for mat in to_stack) + if ( + sp_version < parse_version("1.9.2") + and self.n_output_features_ > max_int32 + and all_int32 + ): + raise ValueError( # pragma: no cover + "In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`" + " produces negative columns when:\n1. The output shape contains" + " `n_cols` too large to be represented by a 32bit signed" + " integer.\n2. All sub-matrices to be stacked have indices of" + " dtype `np.int32`.\nTo avoid this error, either use a version" + " of scipy `>=1.9.2` or alter the `PolynomialFeatures`" + " transformer to produce fewer than 2^31 output features" + ) + XP = sparse.hstack(to_stack, dtype=X.dtype, format="csr") + elif sparse.issparse(X) and X.format == "csc" and self._max_degree < 4: + return self.transform(X.tocsr()).tocsc() + elif sparse.issparse(X): + combinations = self._combinations( + n_features=n_features, + min_degree=self._min_degree, + max_degree=self._max_degree, + interaction_only=self.interaction_only, + include_bias=self.include_bias, + ) + columns = [] + for combi in combinations: + if combi: + out_col = 1 + for col_idx in combi: + out_col = X[:, [col_idx]].multiply(out_col) + columns.append(out_col) + else: + bias = sparse.csc_matrix(np.ones((X.shape[0], 1))) + columns.append(bias) + XP = sparse.hstack(columns, dtype=X.dtype).tocsc() + else: + # Do as if _min_degree = 0 and cut down array after the + # computation, i.e. use _n_out_full instead of n_output_features_. + XP = np.empty( + shape=(n_samples, self._n_out_full), dtype=X.dtype, order=self.order + ) + + # What follows is a faster implementation of: + # for i, comb in enumerate(combinations): + # XP[:, i] = X[:, comb].prod(1) + # This implementation uses two optimisations. + # First one is broadcasting, + # multiply ([X1, ..., Xn], X1) -> [X1 X1, ..., Xn X1] + # multiply ([X2, ..., Xn], X2) -> [X2 X2, ..., Xn X2] + # ... + # multiply ([X[:, start:end], X[:, start]) -> ... + # Second optimisation happens for degrees >= 3. + # Xi^3 is computed reusing previous computation: + # Xi^3 = Xi^2 * Xi. + + # degree 0 term + if self.include_bias: + XP[:, 0] = 1 + current_col = 1 + else: + current_col = 0 + + if self._max_degree == 0: + return XP + + # degree 1 term + XP[:, current_col : current_col + n_features] = X + index = list(range(current_col, current_col + n_features)) + current_col += n_features + index.append(current_col) + + # loop over degree >= 2 terms + for _ in range(2, self._max_degree + 1): + new_index = [] + end = index[-1] + for feature_idx in range(n_features): + start = index[feature_idx] + new_index.append(current_col) + if self.interaction_only: + start += index[feature_idx + 1] - index[feature_idx] + next_col = current_col + end - start + if next_col <= current_col: + break + # XP[:, start:end] are terms of degree d - 1 + # that exclude feature #feature_idx. + np.multiply( + XP[:, start:end], + X[:, feature_idx : feature_idx + 1], + out=XP[:, current_col:next_col], + casting="no", + ) + current_col = next_col + + new_index.append(current_col) + index = new_index + + if self._min_degree > 1: + n_XP, n_Xout = self._n_out_full, self.n_output_features_ + if self.include_bias: + Xout = np.empty( + shape=(n_samples, n_Xout), dtype=XP.dtype, order=self.order + ) + Xout[:, 0] = 1 + Xout[:, 1:] = XP[:, n_XP - n_Xout + 1 :] + else: + Xout = XP[:, n_XP - n_Xout :].copy() + XP = Xout + return XP + + +class SplineTransformer(TransformerMixin, BaseEstimator): + """Generate univariate B-spline bases for features. + + Generate a new feature matrix consisting of + `n_splines=n_knots + degree - 1` (`n_knots - 1` for + `extrapolation="periodic"`) spline basis functions + (B-splines) of polynomial order=`degree` for each feature. + + In order to learn more about the SplineTransformer class go to: + :ref:`sphx_glr_auto_examples_applications_plot_cyclical_feature_engineering.py` + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + n_knots : int, default=5 + Number of knots of the splines if `knots` equals one of + {'uniform', 'quantile'}. Must be larger or equal 2. Ignored if `knots` + is array-like. + + degree : int, default=3 + The polynomial degree of the spline basis. Must be a non-negative + integer. + + knots : {'uniform', 'quantile'} or array-like of shape \ + (n_knots, n_features), default='uniform' + Set knot positions such that first knot <= features <= last knot. + + - If 'uniform', `n_knots` number of knots are distributed uniformly + from min to max values of the features. + - If 'quantile', they are distributed uniformly along the quantiles of + the features. + - If an array-like is given, it directly specifies the sorted knot + positions including the boundary knots. Note that, internally, + `degree` number of knots are added before the first knot, the same + after the last knot. + + extrapolation : {'error', 'constant', 'linear', 'continue', 'periodic'}, \ + default='constant' + If 'error', values outside the min and max values of the training + features raises a `ValueError`. If 'constant', the value of the + splines at minimum and maximum value of the features is used as + constant extrapolation. If 'linear', a linear extrapolation is used. + If 'continue', the splines are extrapolated as is, i.e. option + `extrapolate=True` in :class:`scipy.interpolate.BSpline`. If + 'periodic', periodic splines with a periodicity equal to the distance + between the first and last knot are used. Periodic splines enforce + equal function values and derivatives at the first and last knot. + For example, this makes it possible to avoid introducing an arbitrary + jump between Dec 31st and Jan 1st in spline features derived from a + naturally periodic "day-of-year" input feature. In this case it is + recommended to manually set the knot values to control the period. + + include_bias : bool, default=True + If False, then the last spline element inside the data range + of a feature is dropped. As B-splines sum to one over the spline basis + functions for each data point, they implicitly include a bias term, + i.e. a column of ones. It acts as an intercept term in a linear models. + + order : {'C', 'F'}, default='C' + Order of output array in the dense case. `'F'` order is faster to compute, but + may slow down subsequent estimators. + + sparse_output : bool, default=False + Will return sparse CSR matrix if set True else will return an array. This + option is only available with `scipy>=1.8`. + + .. versionadded:: 1.2 + + Attributes + ---------- + bsplines_ : list of shape (n_features,) + List of BSplines objects, one for each feature. + + n_features_in_ : int + The total number of input features. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_features_out_ : int + The total number of output features, which is computed as + `n_features * n_splines`, where `n_splines` is + the number of bases elements of the B-splines, + `n_knots + degree - 1` for non-periodic splines and + `n_knots - 1` for periodic ones. + If `include_bias=False`, then it is only + `n_features * (n_splines - 1)`. + + See Also + -------- + KBinsDiscretizer : Transformer that bins continuous data into intervals. + + PolynomialFeatures : Transformer that generates polynomial and interaction + features. + + Notes + ----- + High degrees and a high number of knots can cause overfitting. + + See :ref:`examples/linear_model/plot_polynomial_interpolation.py + `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import SplineTransformer + >>> X = np.arange(6).reshape(6, 1) + >>> spline = SplineTransformer(degree=2, n_knots=3) + >>> spline.fit_transform(X) + array([[0.5 , 0.5 , 0. , 0. ], + [0.18, 0.74, 0.08, 0. ], + [0.02, 0.66, 0.32, 0. ], + [0. , 0.32, 0.66, 0.02], + [0. , 0.08, 0.74, 0.18], + [0. , 0. , 0.5 , 0.5 ]]) + """ + + _parameter_constraints: dict = { + "n_knots": [Interval(Integral, 2, None, closed="left")], + "degree": [Interval(Integral, 0, None, closed="left")], + "knots": [StrOptions({"uniform", "quantile"}), "array-like"], + "extrapolation": [ + StrOptions({"error", "constant", "linear", "continue", "periodic"}) + ], + "include_bias": ["boolean"], + "order": [StrOptions({"C", "F"})], + "sparse_output": ["boolean"], + } + + def __init__( + self, + n_knots=5, + degree=3, + *, + knots="uniform", + extrapolation="constant", + include_bias=True, + order="C", + sparse_output=False, + ): + self.n_knots = n_knots + self.degree = degree + self.knots = knots + self.extrapolation = extrapolation + self.include_bias = include_bias + self.order = order + self.sparse_output = sparse_output + + @staticmethod + def _get_base_knot_positions(X, n_knots=10, knots="uniform", sample_weight=None): + """Calculate base knot positions. + + Base knots such that first knot <= feature <= last knot. For the + B-spline construction with scipy.interpolate.BSpline, 2*degree knots + beyond the base interval are added. + + Returns + ------- + knots : ndarray of shape (n_knots, n_features), dtype=np.float64 + Knot positions (points) of base interval. + """ + if knots == "quantile": + percentiles = 100 * np.linspace( + start=0, stop=1, num=n_knots, dtype=np.float64 + ) + + if sample_weight is None: + knots = np.percentile(X, percentiles, axis=0) + else: + knots = np.array( + [ + _weighted_percentile(X, sample_weight, percentile) + for percentile in percentiles + ] + ) + + else: + # knots == 'uniform': + # Note that the variable `knots` has already been validated and + # `else` is therefore safe. + # Disregard observations with zero weight. + mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0 + x_min = np.amin(X[mask], axis=0) + x_max = np.amax(X[mask], axis=0) + + knots = np.linspace( + start=x_min, + stop=x_max, + num=n_knots, + endpoint=True, + dtype=np.float64, + ) + + return knots + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + n_splines = self.bsplines_[0].c.shape[1] + + input_features = _check_feature_names_in(self, input_features) + feature_names = [] + for i in range(self.n_features_in_): + for j in range(n_splines - 1 + self.include_bias): + feature_names.append(f"{input_features[i]}_sp_{j}") + return np.asarray(feature_names, dtype=object) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, sample_weight=None): + """Compute knot positions of splines. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + + y : None + Ignored. + + sample_weight : array-like of shape (n_samples,), default = None + Individual weights for each sample. Used to calculate quantiles if + `knots="quantile"`. For `knots="uniform"`, zero weighted + observations are ignored for finding the min and max of `X`. + + Returns + ------- + self : object + Fitted transformer. + """ + X = self._validate_data( + X, + reset=True, + accept_sparse=False, + ensure_min_samples=2, + ensure_2d=True, + ) + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + _, n_features = X.shape + + if isinstance(self.knots, str): + base_knots = self._get_base_knot_positions( + X, n_knots=self.n_knots, knots=self.knots, sample_weight=sample_weight + ) + else: + base_knots = check_array(self.knots, dtype=np.float64) + if base_knots.shape[0] < 2: + raise ValueError("Number of knots, knots.shape[0], must be >= 2.") + elif base_knots.shape[1] != n_features: + raise ValueError("knots.shape[1] == n_features is violated.") + elif not np.all(np.diff(base_knots, axis=0) > 0): + raise ValueError("knots must be sorted without duplicates.") + + if self.sparse_output and sp_version < parse_version("1.8.0"): + raise ValueError( + "Option sparse_output=True is only available with scipy>=1.8.0, " + f"but here scipy=={sp_version} is used." + ) + + # number of knots for base interval + n_knots = base_knots.shape[0] + + if self.extrapolation == "periodic" and n_knots <= self.degree: + raise ValueError( + "Periodic splines require degree < n_knots. Got n_knots=" + f"{n_knots} and degree={self.degree}." + ) + + # number of splines basis functions + if self.extrapolation != "periodic": + n_splines = n_knots + self.degree - 1 + else: + # periodic splines have self.degree less degrees of freedom + n_splines = n_knots - 1 + + degree = self.degree + n_out = n_features * n_splines + # We have to add degree number of knots below, and degree number knots + # above the base knots in order to make the spline basis complete. + if self.extrapolation == "periodic": + # For periodic splines the spacing of the first / last degree knots + # needs to be a continuation of the spacing of the last / first + # base knots. + period = base_knots[-1] - base_knots[0] + knots = np.r_[ + base_knots[-(degree + 1) : -1] - period, + base_knots, + base_knots[1 : (degree + 1)] + period, + ] + + else: + # Eilers & Marx in "Flexible smoothing with B-splines and + # penalties" https://doi.org/10.1214/ss/1038425655 advice + # against repeating first and last knot several times, which + # would have inferior behaviour at boundaries if combined with + # a penalty (hence P-Spline). We follow this advice even if our + # splines are unpenalized. Meaning we do not: + # knots = np.r_[ + # np.tile(base_knots.min(axis=0), reps=[degree, 1]), + # base_knots, + # np.tile(base_knots.max(axis=0), reps=[degree, 1]) + # ] + # Instead, we reuse the distance of the 2 fist/last knots. + dist_min = base_knots[1] - base_knots[0] + dist_max = base_knots[-1] - base_knots[-2] + + knots = np.r_[ + np.linspace( + base_knots[0] - degree * dist_min, + base_knots[0] - dist_min, + num=degree, + ), + base_knots, + np.linspace( + base_knots[-1] + dist_max, + base_knots[-1] + degree * dist_max, + num=degree, + ), + ] + + # With a diagonal coefficient matrix, we get back the spline basis + # elements, i.e. the design matrix of the spline. + # Note, BSpline appreciates C-contiguous float64 arrays as c=coef. + coef = np.eye(n_splines, dtype=np.float64) + if self.extrapolation == "periodic": + coef = np.concatenate((coef, coef[:degree, :])) + + extrapolate = self.extrapolation in ["periodic", "continue"] + + bsplines = [ + BSpline.construct_fast( + knots[:, i], coef, self.degree, extrapolate=extrapolate + ) + for i in range(n_features) + ] + self.bsplines_ = bsplines + + self.n_features_out_ = n_out - n_features * (1 - self.include_bias) + return self + + def transform(self, X): + """Transform each feature data to B-splines. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to transform. + + Returns + ------- + XBS : {ndarray, sparse matrix} of shape (n_samples, n_features * n_splines) + The matrix of features, where n_splines is the number of bases + elements of the B-splines, n_knots + degree - 1. + """ + check_is_fitted(self) + + X = self._validate_data(X, reset=False, accept_sparse=False, ensure_2d=True) + + n_samples, n_features = X.shape + n_splines = self.bsplines_[0].c.shape[1] + degree = self.degree + + # TODO: Remove this condition, once scipy 1.10 is the minimum version. + # Only scipy => 1.10 supports design_matrix(.., extrapolate=..). + # The default (implicit in scipy < 1.10) is extrapolate=False. + scipy_1_10 = sp_version >= parse_version("1.10.0") + # Note: self.bsplines_[0].extrapolate is True for extrapolation in + # ["periodic", "continue"] + if scipy_1_10: + use_sparse = self.sparse_output + kwargs_extrapolate = {"extrapolate": self.bsplines_[0].extrapolate} + else: + use_sparse = self.sparse_output and not self.bsplines_[0].extrapolate + kwargs_extrapolate = dict() + + # Note that scipy BSpline returns float64 arrays and converts input + # x=X[:, i] to c-contiguous float64. + n_out = self.n_features_out_ + n_features * (1 - self.include_bias) + if X.dtype in FLOAT_DTYPES: + dtype = X.dtype + else: + dtype = np.float64 + if use_sparse: + output_list = [] + else: + XBS = np.zeros((n_samples, n_out), dtype=dtype, order=self.order) + + for i in range(n_features): + spl = self.bsplines_[i] + + if self.extrapolation in ("continue", "error", "periodic"): + if self.extrapolation == "periodic": + # With periodic extrapolation we map x to the segment + # [spl.t[k], spl.t[n]]. + # This is equivalent to BSpline(.., extrapolate="periodic") + # for scipy>=1.0.0. + n = spl.t.size - spl.k - 1 + # Assign to new array to avoid inplace operation + x = spl.t[spl.k] + (X[:, i] - spl.t[spl.k]) % ( + spl.t[n] - spl.t[spl.k] + ) + else: + x = X[:, i] + + if use_sparse: + XBS_sparse = BSpline.design_matrix( + x, spl.t, spl.k, **kwargs_extrapolate + ) + if self.extrapolation == "periodic": + # See the construction of coef in fit. We need to add the last + # degree spline basis function to the first degree ones and + # then drop the last ones. + # Note: See comment about SparseEfficiencyWarning below. + XBS_sparse = XBS_sparse.tolil() + XBS_sparse[:, :degree] += XBS_sparse[:, -degree:] + XBS_sparse = XBS_sparse[:, :-degree] + else: + XBS[:, (i * n_splines) : ((i + 1) * n_splines)] = spl(x) + else: # extrapolation in ("constant", "linear") + xmin, xmax = spl.t[degree], spl.t[-degree - 1] + # spline values at boundaries + f_min, f_max = spl(xmin), spl(xmax) + mask = (xmin <= X[:, i]) & (X[:, i] <= xmax) + if use_sparse: + mask_inv = ~mask + x = X[:, i].copy() + # Set some arbitrary values outside boundary that will be reassigned + # later. + x[mask_inv] = spl.t[self.degree] + XBS_sparse = BSpline.design_matrix(x, spl.t, spl.k) + # Note: Without converting to lil_matrix we would get: + # scipy.sparse._base.SparseEfficiencyWarning: Changing the sparsity + # structure of a csr_matrix is expensive. lil_matrix is more + # efficient. + if np.any(mask_inv): + XBS_sparse = XBS_sparse.tolil() + XBS_sparse[mask_inv, :] = 0 + else: + XBS[mask, (i * n_splines) : ((i + 1) * n_splines)] = spl(X[mask, i]) + + # Note for extrapolation: + # 'continue' is already returned as is by scipy BSplines + if self.extrapolation == "error": + # BSpline with extrapolate=False does not raise an error, but + # outputs np.nan. + if (use_sparse and np.any(np.isnan(XBS_sparse.data))) or ( + not use_sparse + and np.any( + np.isnan(XBS[:, (i * n_splines) : ((i + 1) * n_splines)]) + ) + ): + raise ValueError( + "X contains values beyond the limits of the knots." + ) + elif self.extrapolation == "constant": + # Set all values beyond xmin and xmax to the value of the + # spline basis functions at those two positions. + # Only the first degree and last degree number of splines + # have non-zero values at the boundaries. + + mask = X[:, i] < xmin + if np.any(mask): + if use_sparse: + # Note: See comment about SparseEfficiencyWarning above. + XBS_sparse = XBS_sparse.tolil() + XBS_sparse[mask, :degree] = f_min[:degree] + + else: + XBS[mask, (i * n_splines) : (i * n_splines + degree)] = f_min[ + :degree + ] + + mask = X[:, i] > xmax + if np.any(mask): + if use_sparse: + # Note: See comment about SparseEfficiencyWarning above. + XBS_sparse = XBS_sparse.tolil() + XBS_sparse[mask, -degree:] = f_max[-degree:] + else: + XBS[ + mask, + ((i + 1) * n_splines - degree) : ((i + 1) * n_splines), + ] = f_max[-degree:] + + elif self.extrapolation == "linear": + # Continue the degree first and degree last spline bases + # linearly beyond the boundaries, with slope = derivative at + # the boundary. + # Note that all others have derivative = value = 0 at the + # boundaries. + + # spline derivatives = slopes at boundaries + fp_min, fp_max = spl(xmin, nu=1), spl(xmax, nu=1) + # Compute the linear continuation. + if degree <= 1: + # For degree=1, the derivative of 2nd spline is not zero at + # boundary. For degree=0 it is the same as 'constant'. + degree += 1 + for j in range(degree): + mask = X[:, i] < xmin + if np.any(mask): + linear_extr = f_min[j] + (X[mask, i] - xmin) * fp_min[j] + if use_sparse: + # Note: See comment about SparseEfficiencyWarning above. + XBS_sparse = XBS_sparse.tolil() + XBS_sparse[mask, j] = linear_extr + else: + XBS[mask, i * n_splines + j] = linear_extr + + mask = X[:, i] > xmax + if np.any(mask): + k = n_splines - 1 - j + linear_extr = f_max[k] + (X[mask, i] - xmax) * fp_max[k] + if use_sparse: + # Note: See comment about SparseEfficiencyWarning above. + XBS_sparse = XBS_sparse.tolil() + XBS_sparse[mask, k : k + 1] = linear_extr[:, None] + else: + XBS[mask, i * n_splines + k] = linear_extr + + if use_sparse: + XBS_sparse = XBS_sparse.tocsr() + output_list.append(XBS_sparse) + + if use_sparse: + # TODO: Remove this conditional error when the minimum supported version of + # SciPy is 1.9.2 + # `scipy.sparse.hstack` breaks in scipy<1.9.2 + # when `n_features_out_ > max_int32` + max_int32 = np.iinfo(np.int32).max + all_int32 = True + for mat in output_list: + all_int32 &= mat.indices.dtype == np.int32 + if ( + sp_version < parse_version("1.9.2") + and self.n_features_out_ > max_int32 + and all_int32 + ): + raise ValueError( + "In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`" + " produces negative columns when:\n1. The output shape contains" + " `n_cols` too large to be represented by a 32bit signed" + " integer.\n. All sub-matrices to be stacked have indices of" + " dtype `np.int32`.\nTo avoid this error, either use a version" + " of scipy `>=1.9.2` or alter the `SplineTransformer`" + " transformer to produce fewer than 2^31 output features" + ) + XBS = sparse.hstack(output_list, format="csr") + elif self.sparse_output: + # TODO: Remove ones scipy 1.10 is the minimum version. See comments above. + XBS = sparse.csr_matrix(XBS) + + if self.include_bias: + return XBS + else: + # We throw away one spline basis per feature. + # We chose the last one. + indices = [j for j in range(XBS.shape[1]) if (j + 1) % n_splines != 0] + return XBS[:, indices] + + def _more_tags(self): + return { + "_xfail_checks": { + "check_estimators_pickle": ( + "Current Scipy implementation of _bsplines does not" + "support const memory views." + ), + } + } diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..b3b7c3d5e7bd911153d9e9724c05cc673c9f3cfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder.py @@ -0,0 +1,531 @@ +from numbers import Integral, Real + +import numpy as np + +from ..base import OneToOneFeatureMixin, _fit_context +from ..utils._param_validation import Interval, StrOptions +from ..utils.multiclass import type_of_target +from ..utils.validation import ( + _check_feature_names_in, + _check_y, + check_consistent_length, + check_is_fitted, +) +from ._encoders import _BaseEncoder +from ._target_encoder_fast import _fit_encoding_fast, _fit_encoding_fast_auto_smooth + + +class TargetEncoder(OneToOneFeatureMixin, _BaseEncoder): + """Target Encoder for regression and classification targets. + + Each category is encoded based on a shrunk estimate of the average target + values for observations belonging to the category. The encoding scheme mixes + the global target mean with the target mean conditioned on the value of the + category (see [MIC]_). + + When the target type is "multiclass", encodings are based + on the conditional probability estimate for each class. The target is first + binarized using the "one-vs-all" scheme via + :class:`~sklearn.preprocessing.LabelBinarizer`, then the average target + value for each class and each category is used for encoding, resulting in + `n_features` * `n_classes` encoded output features. + + :class:`TargetEncoder` considers missing values, such as `np.nan` or `None`, + as another category and encodes them like any other category. Categories + that are not seen during :meth:`fit` are encoded with the target mean, i.e. + `target_mean_`. + + For a demo on the importance of the `TargetEncoder` internal cross-fitting, + see + :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder_cross_val.py`. + For a comparison of different encoders, refer to + :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`. Read + more in the :ref:`User Guide `. + + .. note:: + `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a + :term:`cross fitting` scheme is used in `fit_transform` for encoding. + See the :ref:`User Guide ` for details. + + .. versionadded:: 1.3 + + Parameters + ---------- + categories : "auto" or list of shape (n_features,) of array-like, default="auto" + Categories (unique values) per feature: + + - `"auto"` : Determine categories automatically from the training data. + - list : `categories[i]` holds the categories expected in the i-th column. The + passed categories should not mix strings and numeric values within a single + feature, and should be sorted in case of numeric values. + + The used categories are stored in the `categories_` fitted attribute. + + target_type : {"auto", "continuous", "binary", "multiclass"}, default="auto" + Type of target. + + - `"auto"` : Type of target is inferred with + :func:`~sklearn.utils.multiclass.type_of_target`. + - `"continuous"` : Continuous target + - `"binary"` : Binary target + - `"multiclass"` : Multiclass target + + .. note:: + The type of target inferred with `"auto"` may not be the desired target + type used for modeling. For example, if the target consisted of integers + between 0 and 100, then :func:`~sklearn.utils.multiclass.type_of_target` + will infer the target as `"multiclass"`. In this case, setting + `target_type="continuous"` will specify the target as a regression + problem. The `target_type_` attribute gives the target type used by the + encoder. + + .. versionchanged:: 1.4 + Added the option 'multiclass'. + + smooth : "auto" or float, default="auto" + The amount of mixing of the target mean conditioned on the value of the + category with the global target mean. A larger `smooth` value will put + more weight on the global target mean. + If `"auto"`, then `smooth` is set to an empirical Bayes estimate. + + cv : int, default=5 + Determines the number of folds in the :term:`cross fitting` strategy used in + :meth:`fit_transform`. For classification targets, `StratifiedKFold` is used + and for continuous targets, `KFold` is used. + + shuffle : bool, default=True + Whether to shuffle the data in :meth:`fit_transform` before splitting into + folds. Note that the samples within each split will not be shuffled. + + random_state : int, RandomState instance or None, default=None + When `shuffle` is True, `random_state` affects the ordering of the + indices, which controls the randomness of each fold. Otherwise, this + parameter has no effect. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + encodings_ : list of shape (n_features,) or (n_features * n_classes) of \ + ndarray + Encodings learnt on all of `X`. + For feature `i`, `encodings_[i]` are the encodings matching the + categories listed in `categories_[i]`. When `target_type_` is + "multiclass", the encoding for feature `i` and class `j` is stored in + `encodings_[j + (i * len(classes_))]`. E.g., for 2 features (f) and + 3 classes (c), encodings are ordered: + f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2, + + categories_ : list of shape (n_features,) of ndarray + The categories of each input feature determined during fitting or + specified in `categories` + (in order of the features in `X` and corresponding with the output + of :meth:`transform`). + + target_type_ : str + Type of target. + + target_mean_ : float + The overall mean of the target. This value is only used in :meth:`transform` + to encode categories. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + classes_ : ndarray or None + If `target_type_` is 'binary' or 'multiclass', holds the label for each class, + otherwise `None`. + + See Also + -------- + OrdinalEncoder : Performs an ordinal (integer) encoding of the categorical features. + Contrary to TargetEncoder, this encoding is not supervised. Treating the + resulting encoding as a numerical features therefore lead arbitrarily + ordered values and therefore typically lead to lower predictive performance + when used as preprocessing for a classifier or regressor. + OneHotEncoder : Performs a one-hot encoding of categorical features. This + unsupervised encoding is better suited for low cardinality categorical + variables as it generate one new feature per unique category. + + References + ---------- + .. [MIC] :doi:`Micci-Barreca, Daniele. "A preprocessing scheme for high-cardinality + categorical attributes in classification and prediction problems" + SIGKDD Explor. Newsl. 3, 1 (July 2001), 27–32. <10.1145/507533.507538>` + + Examples + -------- + With `smooth="auto"`, the smoothing parameter is set to an empirical Bayes estimate: + + >>> import numpy as np + >>> from sklearn.preprocessing import TargetEncoder + >>> X = np.array([["dog"] * 20 + ["cat"] * 30 + ["snake"] * 38], dtype=object).T + >>> y = [90.3] * 5 + [80.1] * 15 + [20.4] * 5 + [20.1] * 25 + [21.2] * 8 + [49] * 30 + >>> enc_auto = TargetEncoder(smooth="auto") + >>> X_trans = enc_auto.fit_transform(X, y) + + >>> # A high `smooth` parameter puts more weight on global mean on the categorical + >>> # encodings: + >>> enc_high_smooth = TargetEncoder(smooth=5000.0).fit(X, y) + >>> enc_high_smooth.target_mean_ + 44... + >>> enc_high_smooth.encodings_ + [array([44..., 44..., 44...])] + + >>> # On the other hand, a low `smooth` parameter puts more weight on target + >>> # conditioned on the value of the categorical: + >>> enc_low_smooth = TargetEncoder(smooth=1.0).fit(X, y) + >>> enc_low_smooth.encodings_ + [array([20..., 80..., 43...])] + """ + + _parameter_constraints: dict = { + "categories": [StrOptions({"auto"}), list], + "target_type": [StrOptions({"auto", "continuous", "binary", "multiclass"})], + "smooth": [StrOptions({"auto"}), Interval(Real, 0, None, closed="left")], + "cv": [Interval(Integral, 2, None, closed="left")], + "shuffle": ["boolean"], + "random_state": ["random_state"], + } + + def __init__( + self, + categories="auto", + target_type="auto", + smooth="auto", + cv=5, + shuffle=True, + random_state=None, + ): + self.categories = categories + self.smooth = smooth + self.target_type = target_type + self.cv = cv + self.shuffle = shuffle + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit the :class:`TargetEncoder` to X and y. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to determine the categories of each feature. + + y : array-like of shape (n_samples,) + The target data used to encode the categories. + + Returns + ------- + self : object + Fitted encoder. + """ + self._fit_encodings_all(X, y) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y): + """Fit :class:`TargetEncoder` and transform X with the target encoding. + + .. note:: + `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a + :term:`cross fitting` scheme is used in `fit_transform` for encoding. + See the :ref:`User Guide `. for details. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to determine the categories of each feature. + + y : array-like of shape (n_samples,) + The target data used to encode the categories. + + Returns + ------- + X_trans : ndarray of shape (n_samples, n_features) or \ + (n_samples, (n_features * n_classes)) + Transformed input. + """ + from ..model_selection import KFold, StratifiedKFold # avoid circular import + + X_ordinal, X_known_mask, y_encoded, n_categories = self._fit_encodings_all(X, y) + + # The cv splitter is voluntarily restricted to *KFold to enforce non + # overlapping validation folds, otherwise the fit_transform output will + # not be well-specified. + if self.target_type_ == "continuous": + cv = KFold(self.cv, shuffle=self.shuffle, random_state=self.random_state) + else: + cv = StratifiedKFold( + self.cv, shuffle=self.shuffle, random_state=self.random_state + ) + + # If 'multiclass' multiply axis=1 by num classes else keep shape the same + if self.target_type_ == "multiclass": + X_out = np.empty( + (X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)), + dtype=np.float64, + ) + else: + X_out = np.empty_like(X_ordinal, dtype=np.float64) + + for train_idx, test_idx in cv.split(X, y): + X_train, y_train = X_ordinal[train_idx, :], y_encoded[train_idx] + y_train_mean = np.mean(y_train, axis=0) + + if self.target_type_ == "multiclass": + encodings = self._fit_encoding_multiclass( + X_train, + y_train, + n_categories, + y_train_mean, + ) + else: + encodings = self._fit_encoding_binary_or_continuous( + X_train, + y_train, + n_categories, + y_train_mean, + ) + self._transform_X_ordinal( + X_out, + X_ordinal, + ~X_known_mask, + test_idx, + encodings, + y_train_mean, + ) + return X_out + + def transform(self, X): + """Transform X with the target encoding. + + .. note:: + `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a + :term:`cross fitting` scheme is used in `fit_transform` for encoding. + See the :ref:`User Guide `. for details. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to determine the categories of each feature. + + Returns + ------- + X_trans : ndarray of shape (n_samples, n_features) or \ + (n_samples, (n_features * n_classes)) + Transformed input. + """ + X_ordinal, X_known_mask = self._transform( + X, handle_unknown="ignore", force_all_finite="allow-nan" + ) + + # If 'multiclass' multiply axis=1 by num of classes else keep shape the same + if self.target_type_ == "multiclass": + X_out = np.empty( + (X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)), + dtype=np.float64, + ) + else: + X_out = np.empty_like(X_ordinal, dtype=np.float64) + + self._transform_X_ordinal( + X_out, + X_ordinal, + ~X_known_mask, + slice(None), + self.encodings_, + self.target_mean_, + ) + return X_out + + def _fit_encodings_all(self, X, y): + """Fit a target encoding with all the data.""" + # avoid circular import + from ..preprocessing import ( + LabelBinarizer, + LabelEncoder, + ) + + check_consistent_length(X, y) + self._fit(X, handle_unknown="ignore", force_all_finite="allow-nan") + + if self.target_type == "auto": + accepted_target_types = ("binary", "multiclass", "continuous") + inferred_type_of_target = type_of_target(y, input_name="y") + if inferred_type_of_target not in accepted_target_types: + raise ValueError( + "Unknown label type: Target type was inferred to be " + f"{inferred_type_of_target!r}. Only {accepted_target_types} are " + "supported." + ) + self.target_type_ = inferred_type_of_target + else: + self.target_type_ = self.target_type + + self.classes_ = None + if self.target_type_ == "binary": + label_encoder = LabelEncoder() + y = label_encoder.fit_transform(y) + self.classes_ = label_encoder.classes_ + elif self.target_type_ == "multiclass": + label_binarizer = LabelBinarizer() + y = label_binarizer.fit_transform(y) + self.classes_ = label_binarizer.classes_ + else: # continuous + y = _check_y(y, y_numeric=True, estimator=self) + + self.target_mean_ = np.mean(y, axis=0) + + X_ordinal, X_known_mask = self._transform( + X, handle_unknown="ignore", force_all_finite="allow-nan" + ) + n_categories = np.fromiter( + (len(category_for_feature) for category_for_feature in self.categories_), + dtype=np.int64, + count=len(self.categories_), + ) + if self.target_type_ == "multiclass": + encodings = self._fit_encoding_multiclass( + X_ordinal, + y, + n_categories, + self.target_mean_, + ) + else: + encodings = self._fit_encoding_binary_or_continuous( + X_ordinal, + y, + n_categories, + self.target_mean_, + ) + self.encodings_ = encodings + + return X_ordinal, X_known_mask, y, n_categories + + def _fit_encoding_binary_or_continuous( + self, X_ordinal, y, n_categories, target_mean + ): + """Learn target encodings.""" + if self.smooth == "auto": + y_variance = np.var(y) + encodings = _fit_encoding_fast_auto_smooth( + X_ordinal, + y, + n_categories, + target_mean, + y_variance, + ) + else: + encodings = _fit_encoding_fast( + X_ordinal, + y, + n_categories, + self.smooth, + target_mean, + ) + return encodings + + def _fit_encoding_multiclass(self, X_ordinal, y, n_categories, target_mean): + """Learn multiclass encodings. + + Learn encodings for each class (c) then reorder encodings such that + the same features (f) are grouped together. `reorder_index` enables + converting from: + f0_c0, f1_c0, f0_c1, f1_c1, f0_c2, f1_c2 + to: + f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2 + """ + n_features = self.n_features_in_ + n_classes = len(self.classes_) + + encodings = [] + for i in range(n_classes): + y_class = y[:, i] + encoding = self._fit_encoding_binary_or_continuous( + X_ordinal, + y_class, + n_categories, + target_mean[i], + ) + encodings.extend(encoding) + + reorder_index = ( + idx + for start in range(n_features) + for idx in range(start, (n_classes * n_features), n_features) + ) + return [encodings[idx] for idx in reorder_index] + + def _transform_X_ordinal( + self, + X_out, + X_ordinal, + X_unknown_mask, + row_indices, + encodings, + target_mean, + ): + """Transform X_ordinal using encodings. + + In the multiclass case, `X_ordinal` and `X_unknown_mask` have column + (axis=1) size `n_features`, while `encodings` has length of size + `n_features * n_classes`. `feat_idx` deals with this by repeating + feature indices by `n_classes` E.g., for 3 features, 2 classes: + 0,0,1,1,2,2 + + Additionally, `target_mean` is of shape (`n_classes`,) so `mean_idx` + cycles through 0 to `n_classes` - 1, `n_features` times. + """ + if self.target_type_ == "multiclass": + n_classes = len(self.classes_) + for e_idx, encoding in enumerate(encodings): + # Repeat feature indices by n_classes + feat_idx = e_idx // n_classes + # Cycle through each class + mean_idx = e_idx % n_classes + X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, feat_idx]] + X_out[X_unknown_mask[:, feat_idx], e_idx] = target_mean[mean_idx] + else: + for e_idx, encoding in enumerate(encodings): + X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, e_idx]] + X_out[X_unknown_mask[:, e_idx], e_idx] = target_mean + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Not used, present here for API consistency by convention. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. `feature_names_in_` is used unless it is + not defined, in which case the following input feature names are + generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + When `type_of_target_` is "multiclass" the names are of the format + '_'. + """ + check_is_fitted(self, "n_features_in_") + feature_names = _check_feature_names_in(self, input_features) + if self.target_type_ == "multiclass": + feature_names = [ + f"{feature_name}_{class_name}" + for feature_name in feature_names + for class_name in self.classes_ + ] + return np.asarray(feature_names, dtype=object) + else: + return feature_names + + def _more_tags(self): + return { + "requires_y": True, + } diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder_fast.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..13734c9ef32d972fbf5777bede89232bf5a5d141 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f99d3303865ebff3b9553a1653e6d80ca733dafe Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_data.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..24d8ab2a36c3ac5d98e8b0ac373cc185b48eb810 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_data.py @@ -0,0 +1,2593 @@ +# Authors: +# +# Giorgio Patrini +# +# License: BSD 3 clause + +import re +import warnings + +import numpy as np +import numpy.linalg as la +import pytest +from scipy import sparse, stats + +from sklearn import datasets +from sklearn.base import clone +from sklearn.exceptions import NotFittedError +from sklearn.metrics.pairwise import linear_kernel +from sklearn.model_selection import cross_val_predict +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import ( + Binarizer, + KernelCenterer, + MaxAbsScaler, + MinMaxScaler, + Normalizer, + PowerTransformer, + QuantileTransformer, + RobustScaler, + StandardScaler, + add_dummy_feature, + maxabs_scale, + minmax_scale, + normalize, + power_transform, + quantile_transform, + robust_scale, + scale, +) +from sklearn.preprocessing._data import BOUNDS_THRESHOLD, _handle_zeros_in_scale +from sklearn.svm import SVR +from sklearn.utils import gen_batches, shuffle +from sklearn.utils._array_api import ( + yield_namespace_device_dtype_combinations, +) +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_allclose_dense_sparse, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, + skip_if_32bit, +) +from sklearn.utils.estimator_checks import ( + _get_check_estimator_ids, + check_array_api_input_and_values, +) +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + LIL_CONTAINERS, +) +from sklearn.utils.sparsefuncs import mean_variance_axis + +iris = datasets.load_iris() + +# Make some data to be used many times +rng = np.random.RandomState(0) +n_features = 30 +n_samples = 1000 +offsets = rng.uniform(-1, 1, size=n_features) +scales = rng.uniform(1, 10, size=n_features) +X_2d = rng.randn(n_samples, n_features) * scales + offsets +X_1row = X_2d[0, :].reshape(1, n_features) +X_1col = X_2d[:, 0].reshape(n_samples, 1) +X_list_1row = X_1row.tolist() +X_list_1col = X_1col.tolist() + + +def toarray(a): + if hasattr(a, "toarray"): + a = a.toarray() + return a + + +def _check_dim_1axis(a): + return np.asarray(a).shape[0] + + +def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size, n_samples_seen): + if batch_stop != n: + assert (i + 1) * chunk_size == n_samples_seen + else: + assert i * chunk_size + (batch_stop - batch_start) == n_samples_seen + + +def test_raises_value_error_if_sample_weights_greater_than_1d(): + # Sample weights must be either scalar or 1D + + n_sampless = [2, 3] + n_featuress = [3, 2] + + for n_samples, n_features in zip(n_sampless, n_featuress): + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + + scaler = StandardScaler() + + # make sure Error is raised the sample weights greater than 1d + sample_weight_notOK = rng.randn(n_samples, 1) ** 2 + with pytest.raises(ValueError): + scaler.fit(X, y, sample_weight=sample_weight_notOK) + + +@pytest.mark.parametrize( + ["Xw", "X", "sample_weight"], + [ + ([[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [1, 2, 3], [4, 5, 6]], [2.0, 1.0]), + ( + [[1, 0, 1], [0, 0, 1]], + [[1, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]], + np.array([1, 3]), + ), + ( + [[1, np.nan, 1], [np.nan, np.nan, 1]], + [ + [1, np.nan, 1], + [np.nan, np.nan, 1], + [np.nan, np.nan, 1], + [np.nan, np.nan, 1], + ], + np.array([1, 3]), + ), + ], +) +@pytest.mark.parametrize("array_constructor", ["array", "sparse_csr", "sparse_csc"]) +def test_standard_scaler_sample_weight(Xw, X, sample_weight, array_constructor): + with_mean = not array_constructor.startswith("sparse") + X = _convert_container(X, array_constructor) + Xw = _convert_container(Xw, array_constructor) + + # weighted StandardScaler + yw = np.ones(Xw.shape[0]) + scaler_w = StandardScaler(with_mean=with_mean) + scaler_w.fit(Xw, yw, sample_weight=sample_weight) + + # unweighted, but with repeated samples + y = np.ones(X.shape[0]) + scaler = StandardScaler(with_mean=with_mean) + scaler.fit(X, y) + + X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]] + + assert_almost_equal(scaler.mean_, scaler_w.mean_) + assert_almost_equal(scaler.var_, scaler_w.var_) + assert_almost_equal(scaler.transform(X_test), scaler_w.transform(X_test)) + + +def test_standard_scaler_1d(): + # Test scaling of dataset along single axis + for X in [X_1row, X_1col, X_list_1row, X_list_1row]: + scaler = StandardScaler() + X_scaled = scaler.fit(X).transform(X, copy=True) + + if isinstance(X, list): + X = np.array(X) # cast only after scaling done + + if _check_dim_1axis(X) == 1: + assert_almost_equal(scaler.mean_, X.ravel()) + assert_almost_equal(scaler.scale_, np.ones(n_features)) + assert_array_almost_equal(X_scaled.mean(axis=0), np.zeros_like(n_features)) + assert_array_almost_equal(X_scaled.std(axis=0), np.zeros_like(n_features)) + else: + assert_almost_equal(scaler.mean_, X.mean()) + assert_almost_equal(scaler.scale_, X.std()) + assert_array_almost_equal(X_scaled.mean(axis=0), np.zeros_like(n_features)) + assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) + assert_array_almost_equal(X_scaled.std(axis=0), 1.0) + assert scaler.n_samples_seen_ == X.shape[0] + + # check inverse transform + X_scaled_back = scaler.inverse_transform(X_scaled) + assert_array_almost_equal(X_scaled_back, X) + + # Constant feature + X = np.ones((5, 1)) + scaler = StandardScaler() + X_scaled = scaler.fit(X).transform(X, copy=True) + assert_almost_equal(scaler.mean_, 1.0) + assert_almost_equal(scaler.scale_, 1.0) + assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) + assert_array_almost_equal(X_scaled.std(axis=0), 0.0) + assert scaler.n_samples_seen_ == X.shape[0] + + +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) +@pytest.mark.parametrize("add_sample_weight", [False, True]) +def test_standard_scaler_dtype(add_sample_weight, sparse_container): + # Ensure scaling does not affect dtype + rng = np.random.RandomState(0) + n_samples = 10 + n_features = 3 + if add_sample_weight: + sample_weight = np.ones(n_samples) + else: + sample_weight = None + with_mean = True + if sparse_container is not None: + # scipy sparse containers do not support float16, see + # https://github.com/scipy/scipy/issues/7408 for more details. + supported_dtype = [np.float64, np.float32] + else: + supported_dtype = [np.float64, np.float32, np.float16] + for dtype in supported_dtype: + X = rng.randn(n_samples, n_features).astype(dtype) + if sparse_container is not None: + X = sparse_container(X) + with_mean = False + + scaler = StandardScaler(with_mean=with_mean) + X_scaled = scaler.fit(X, sample_weight=sample_weight).transform(X) + assert X.dtype == X_scaled.dtype + assert scaler.mean_.dtype == np.float64 + assert scaler.scale_.dtype == np.float64 + + +@pytest.mark.parametrize( + "scaler", + [ + StandardScaler(with_mean=False), + RobustScaler(with_centering=False), + ], +) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) +@pytest.mark.parametrize("add_sample_weight", [False, True]) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +@pytest.mark.parametrize("constant", [0, 1.0, 100.0]) +def test_standard_scaler_constant_features( + scaler, add_sample_weight, sparse_container, dtype, constant +): + if isinstance(scaler, RobustScaler) and add_sample_weight: + pytest.skip(f"{scaler.__class__.__name__} does not yet support sample_weight") + + rng = np.random.RandomState(0) + n_samples = 100 + n_features = 1 + if add_sample_weight: + fit_params = dict(sample_weight=rng.uniform(size=n_samples) * 2) + else: + fit_params = {} + X_array = np.full(shape=(n_samples, n_features), fill_value=constant, dtype=dtype) + X = X_array if sparse_container is None else sparse_container(X_array) + X_scaled = scaler.fit(X, **fit_params).transform(X) + + if isinstance(scaler, StandardScaler): + # The variance info should be close to zero for constant features. + assert_allclose(scaler.var_, np.zeros(X.shape[1]), atol=1e-7) + + # Constant features should not be scaled (scale of 1.): + assert_allclose(scaler.scale_, np.ones(X.shape[1])) + + assert X_scaled is not X # make sure we make a copy + assert_allclose_dense_sparse(X_scaled, X) + + if isinstance(scaler, StandardScaler) and not add_sample_weight: + # Also check consistency with the standard scale function. + X_scaled_2 = scale(X, with_mean=scaler.with_mean) + assert X_scaled_2 is not X # make sure we did a copy + assert_allclose_dense_sparse(X_scaled_2, X) + + +@pytest.mark.parametrize("n_samples", [10, 100, 10_000]) +@pytest.mark.parametrize("average", [1e-10, 1, 1e10]) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) +def test_standard_scaler_near_constant_features( + n_samples, sparse_container, average, dtype +): + # Check that when the variance is too small (var << mean**2) the feature + # is considered constant and not scaled. + + scale_min, scale_max = -30, 19 + scales = np.array([10**i for i in range(scale_min, scale_max + 1)], dtype=dtype) + + n_features = scales.shape[0] + X = np.empty((n_samples, n_features), dtype=dtype) + # Make a dataset of known var = scales**2 and mean = average + X[: n_samples // 2, :] = average + scales + X[n_samples // 2 :, :] = average - scales + X_array = X if sparse_container is None else sparse_container(X) + + scaler = StandardScaler(with_mean=False).fit(X_array) + + # StandardScaler uses float64 accumulators even if the data has a float32 + # dtype. + eps = np.finfo(np.float64).eps + + # if var < bound = N.eps.var + N².eps².mean², the feature is considered + # constant and the scale_ attribute is set to 1. + bounds = n_samples * eps * scales**2 + n_samples**2 * eps**2 * average**2 + within_bounds = scales**2 <= bounds + + # Check that scale_min is small enough to have some scales below the + # bound and therefore detected as constant: + assert np.any(within_bounds) + + # Check that such features are actually treated as constant by the scaler: + assert all(scaler.var_[within_bounds] <= bounds[within_bounds]) + assert_allclose(scaler.scale_[within_bounds], 1.0) + + # Depending the on the dtype of X, some features might not actually be + # representable as non constant for small scales (even if above the + # precision bound of the float64 variance estimate). Such feature should + # be correctly detected as constants with 0 variance by StandardScaler. + representable_diff = X[0, :] - X[-1, :] != 0 + assert_allclose(scaler.var_[np.logical_not(representable_diff)], 0) + assert_allclose(scaler.scale_[np.logical_not(representable_diff)], 1) + + # The other features are scaled and scale_ is equal to sqrt(var_) assuming + # that scales are large enough for average + scale and average - scale to + # be distinct in X (depending on X's dtype). + common_mask = np.logical_and(scales**2 > bounds, representable_diff) + assert_allclose(scaler.scale_[common_mask], np.sqrt(scaler.var_)[common_mask]) + + +def test_scale_1d(): + # 1-d inputs + X_list = [1.0, 3.0, 5.0, 0.0] + X_arr = np.array(X_list) + + for X in [X_list, X_arr]: + X_scaled = scale(X) + assert_array_almost_equal(X_scaled.mean(), 0.0) + assert_array_almost_equal(X_scaled.std(), 1.0) + assert_array_equal(scale(X, with_mean=False, with_std=False), X) + + +@skip_if_32bit +def test_standard_scaler_numerical_stability(): + # Test numerical stability of scaling + # np.log(1e-5) is taken because of its floating point representation + # was empirically found to cause numerical problems with np.mean & np.std. + x = np.full(8, np.log(1e-5), dtype=np.float64) + # This does not raise a warning as the number of samples is too low + # to trigger the problem in recent numpy + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + scale(x) + assert_array_almost_equal(scale(x), np.zeros(8)) + + # with 2 more samples, the std computation run into numerical issues: + x = np.full(10, np.log(1e-5), dtype=np.float64) + warning_message = "standard deviation of the data is probably very close to 0" + with pytest.warns(UserWarning, match=warning_message): + x_scaled = scale(x) + assert_array_almost_equal(x_scaled, np.zeros(10)) + + x = np.full(10, 1e-100, dtype=np.float64) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + x_small_scaled = scale(x) + assert_array_almost_equal(x_small_scaled, np.zeros(10)) + + # Large values can cause (often recoverable) numerical stability issues: + x_big = np.full(10, 1e100, dtype=np.float64) + warning_message = "Dataset may contain too large values" + with pytest.warns(UserWarning, match=warning_message): + x_big_scaled = scale(x_big) + assert_array_almost_equal(x_big_scaled, np.zeros(10)) + assert_array_almost_equal(x_big_scaled, x_small_scaled) + with pytest.warns(UserWarning, match=warning_message): + x_big_centered = scale(x_big, with_std=False) + assert_array_almost_equal(x_big_centered, np.zeros(10)) + assert_array_almost_equal(x_big_centered, x_small_scaled) + + +def test_scaler_2d_arrays(): + # Test scaling of 2d array along first axis + rng = np.random.RandomState(0) + n_features = 5 + n_samples = 4 + X = rng.randn(n_samples, n_features) + X[:, 0] = 0.0 # first feature is always of zero + + scaler = StandardScaler() + X_scaled = scaler.fit(X).transform(X, copy=True) + assert not np.any(np.isnan(X_scaled)) + assert scaler.n_samples_seen_ == n_samples + + assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) + assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) + # Check that X has been copied + assert X_scaled is not X + + # check inverse transform + X_scaled_back = scaler.inverse_transform(X_scaled) + assert X_scaled_back is not X + assert X_scaled_back is not X_scaled + assert_array_almost_equal(X_scaled_back, X) + + X_scaled = scale(X, axis=1, with_std=False) + assert not np.any(np.isnan(X_scaled)) + assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0]) + X_scaled = scale(X, axis=1, with_std=True) + assert not np.any(np.isnan(X_scaled)) + assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0]) + assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0]) + # Check that the data hasn't been modified + assert X_scaled is not X + + X_scaled = scaler.fit(X).transform(X, copy=False) + assert not np.any(np.isnan(X_scaled)) + assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) + assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) + # Check that X has not been copied + assert X_scaled is X + + X = rng.randn(4, 5) + X[:, 0] = 1.0 # first feature is a constant, non zero feature + scaler = StandardScaler() + X_scaled = scaler.fit(X).transform(X, copy=True) + assert not np.any(np.isnan(X_scaled)) + assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) + assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) + # Check that X has not been copied + assert X_scaled is not X + + +def test_scaler_float16_overflow(): + # Test if the scaler will not overflow on float16 numpy arrays + rng = np.random.RandomState(0) + # float16 has a maximum of 65500.0. On the worst case 5 * 200000 is 100000 + # which is enough to overflow the data type + X = rng.uniform(5, 10, [200000, 1]).astype(np.float16) + + with np.errstate(over="raise"): + scaler = StandardScaler().fit(X) + X_scaled = scaler.transform(X) + + # Calculate the float64 equivalent to verify result + X_scaled_f64 = StandardScaler().fit_transform(X.astype(np.float64)) + + # Overflow calculations may cause -inf, inf, or nan. Since there is no nan + # input, all of the outputs should be finite. This may be redundant since a + # FloatingPointError exception will be thrown on overflow above. + assert np.all(np.isfinite(X_scaled)) + + # The normal distribution is very unlikely to go above 4. At 4.0-8.0 the + # float16 precision is 2^-8 which is around 0.004. Thus only 2 decimals are + # checked to account for precision differences. + assert_array_almost_equal(X_scaled, X_scaled_f64, decimal=2) + + +def test_handle_zeros_in_scale(): + s1 = np.array([0, 1e-16, 1, 2, 3]) + s2 = _handle_zeros_in_scale(s1, copy=True) + + assert_allclose(s1, np.array([0, 1e-16, 1, 2, 3])) + assert_allclose(s2, np.array([1, 1, 1, 2, 3])) + + +def test_minmax_scaler_partial_fit(): + # Test if partial_fit run over many batches of size 1 and 50 + # gives the same results as fit + X = X_2d + n = X.shape[0] + + for chunk_size in [1, 2, 50, n, n + 42]: + # Test mean at the end of the process + scaler_batch = MinMaxScaler().fit(X) + + scaler_incr = MinMaxScaler() + for batch in gen_batches(n_samples, chunk_size): + scaler_incr = scaler_incr.partial_fit(X[batch]) + + assert_array_almost_equal(scaler_batch.data_min_, scaler_incr.data_min_) + assert_array_almost_equal(scaler_batch.data_max_, scaler_incr.data_max_) + assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ + assert_array_almost_equal(scaler_batch.data_range_, scaler_incr.data_range_) + assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) + assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_) + + # Test std after 1 step + batch0 = slice(0, chunk_size) + scaler_batch = MinMaxScaler().fit(X[batch0]) + scaler_incr = MinMaxScaler().partial_fit(X[batch0]) + + assert_array_almost_equal(scaler_batch.data_min_, scaler_incr.data_min_) + assert_array_almost_equal(scaler_batch.data_max_, scaler_incr.data_max_) + assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ + assert_array_almost_equal(scaler_batch.data_range_, scaler_incr.data_range_) + assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) + assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_) + + # Test std until the end of partial fits, and + scaler_batch = MinMaxScaler().fit(X) + scaler_incr = MinMaxScaler() # Clean estimator + for i, batch in enumerate(gen_batches(n_samples, chunk_size)): + scaler_incr = scaler_incr.partial_fit(X[batch]) + assert_correct_incr( + i, + batch_start=batch.start, + batch_stop=batch.stop, + n=n, + chunk_size=chunk_size, + n_samples_seen=scaler_incr.n_samples_seen_, + ) + + +def test_standard_scaler_partial_fit(): + # Test if partial_fit run over many batches of size 1 and 50 + # gives the same results as fit + X = X_2d + n = X.shape[0] + + for chunk_size in [1, 2, 50, n, n + 42]: + # Test mean at the end of the process + scaler_batch = StandardScaler(with_std=False).fit(X) + + scaler_incr = StandardScaler(with_std=False) + for batch in gen_batches(n_samples, chunk_size): + scaler_incr = scaler_incr.partial_fit(X[batch]) + assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_) + assert scaler_batch.var_ == scaler_incr.var_ # Nones + assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ + + # Test std after 1 step + batch0 = slice(0, chunk_size) + scaler_incr = StandardScaler().partial_fit(X[batch0]) + if chunk_size == 1: + assert_array_almost_equal( + np.zeros(n_features, dtype=np.float64), scaler_incr.var_ + ) + assert_array_almost_equal( + np.ones(n_features, dtype=np.float64), scaler_incr.scale_ + ) + else: + assert_array_almost_equal(np.var(X[batch0], axis=0), scaler_incr.var_) + assert_array_almost_equal( + np.std(X[batch0], axis=0), scaler_incr.scale_ + ) # no constants + + # Test std until the end of partial fits, and + scaler_batch = StandardScaler().fit(X) + scaler_incr = StandardScaler() # Clean estimator + for i, batch in enumerate(gen_batches(n_samples, chunk_size)): + scaler_incr = scaler_incr.partial_fit(X[batch]) + assert_correct_incr( + i, + batch_start=batch.start, + batch_stop=batch.stop, + n=n, + chunk_size=chunk_size, + n_samples_seen=scaler_incr.n_samples_seen_, + ) + + assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_) + assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ + + +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_standard_scaler_partial_fit_numerical_stability(sparse_container): + # Test if the incremental computation introduces significative errors + # for large datasets with values of large magniture + rng = np.random.RandomState(0) + n_features = 2 + n_samples = 100 + offsets = rng.uniform(-1e15, 1e15, size=n_features) + scales = rng.uniform(1e3, 1e6, size=n_features) + X = rng.randn(n_samples, n_features) * scales + offsets + + scaler_batch = StandardScaler().fit(X) + scaler_incr = StandardScaler() + for chunk in X: + scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features)) + + # Regardless of abs values, they must not be more diff 6 significant digits + tol = 10 ** (-6) + assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol) + assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol) + assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol) + # NOTE Be aware that for much larger offsets std is very unstable (last + # assert) while mean is OK. + + # Sparse input + size = (100, 3) + scale = 1e20 + X = sparse_container(rng.randint(0, 2, size).astype(np.float64) * scale) + + # with_mean=False is required with sparse input + scaler = StandardScaler(with_mean=False).fit(X) + scaler_incr = StandardScaler(with_mean=False) + + for chunk in X: + scaler_incr = scaler_incr.partial_fit(chunk) + + # Regardless of magnitude, they must not differ more than of 6 digits + tol = 10 ** (-6) + assert scaler.mean_ is not None + assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol) + assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol) + + +@pytest.mark.parametrize("sample_weight", [True, None]) +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_partial_fit_sparse_input(sample_weight, sparse_container): + # Check that sparsity is not destroyed + X = sparse_container(np.array([[1.0], [0.0], [0.0], [5.0]])) + + if sample_weight: + sample_weight = rng.rand(X.shape[0]) + + null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) + X_null = null_transform.partial_fit(X, sample_weight=sample_weight).transform(X) + assert_array_equal(X_null.toarray(), X.toarray()) + X_orig = null_transform.inverse_transform(X_null) + assert_array_equal(X_orig.toarray(), X_null.toarray()) + assert_array_equal(X_orig.toarray(), X.toarray()) + + +@pytest.mark.parametrize("sample_weight", [True, None]) +def test_standard_scaler_trasform_with_partial_fit(sample_weight): + # Check some postconditions after applying partial_fit and transform + X = X_2d[:100, :] + + if sample_weight: + sample_weight = rng.rand(X.shape[0]) + + scaler_incr = StandardScaler() + for i, batch in enumerate(gen_batches(X.shape[0], 1)): + X_sofar = X[: (i + 1), :] + chunks_copy = X_sofar.copy() + if sample_weight is None: + scaled_batch = StandardScaler().fit_transform(X_sofar) + scaler_incr = scaler_incr.partial_fit(X[batch]) + else: + scaled_batch = StandardScaler().fit_transform( + X_sofar, sample_weight=sample_weight[: i + 1] + ) + scaler_incr = scaler_incr.partial_fit( + X[batch], sample_weight=sample_weight[batch] + ) + scaled_incr = scaler_incr.transform(X_sofar) + + assert_array_almost_equal(scaled_batch, scaled_incr) + assert_array_almost_equal(X_sofar, chunks_copy) # No change + right_input = scaler_incr.inverse_transform(scaled_incr) + assert_array_almost_equal(X_sofar, right_input) + + zero = np.zeros(X.shape[1]) + epsilon = np.finfo(float).eps + assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal + assert_array_less(zero, scaler_incr.scale_ + epsilon) + if sample_weight is None: + # (i+1) because the Scaler has been already fitted + assert (i + 1) == scaler_incr.n_samples_seen_ + else: + assert np.sum(sample_weight[: i + 1]) == pytest.approx( + scaler_incr.n_samples_seen_ + ) + + +def test_standard_check_array_of_inverse_transform(): + # Check if StandardScaler inverse_transform is + # converting the integer array to float + x = np.array( + [ + [1, 1, 1, 0, 1, 0], + [1, 1, 1, 0, 1, 0], + [0, 8, 0, 1, 0, 0], + [1, 4, 1, 1, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 4, 0, 1, 0, 1], + ], + dtype=np.int32, + ) + + scaler = StandardScaler() + scaler.fit(x) + + # The of inverse_transform should be converted + # to a float array. + # If not X *= self.scale_ will fail. + scaler.inverse_transform(x) + + +@pytest.mark.parametrize( + "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations() +) +@pytest.mark.parametrize( + "check", + [check_array_api_input_and_values], + ids=_get_check_estimator_ids, +) +@pytest.mark.parametrize( + "estimator", + [ + MaxAbsScaler(), + MinMaxScaler(), + KernelCenterer(), + Normalizer(norm="l1"), + Normalizer(norm="l2"), + Normalizer(norm="max"), + ], + ids=_get_check_estimator_ids, +) +def test_scaler_array_api_compliance( + estimator, check, array_namespace, device, dtype_name +): + name = estimator.__class__.__name__ + check(name, estimator, array_namespace, device=device, dtype_name=dtype_name) + + +def test_min_max_scaler_iris(): + X = iris.data + scaler = MinMaxScaler() + # default params + X_trans = scaler.fit_transform(X) + assert_array_almost_equal(X_trans.min(axis=0), 0) + assert_array_almost_equal(X_trans.max(axis=0), 1) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + # not default params: min=1, max=2 + scaler = MinMaxScaler(feature_range=(1, 2)) + X_trans = scaler.fit_transform(X) + assert_array_almost_equal(X_trans.min(axis=0), 1) + assert_array_almost_equal(X_trans.max(axis=0), 2) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + # min=-.5, max=.6 + scaler = MinMaxScaler(feature_range=(-0.5, 0.6)) + X_trans = scaler.fit_transform(X) + assert_array_almost_equal(X_trans.min(axis=0), -0.5) + assert_array_almost_equal(X_trans.max(axis=0), 0.6) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + # raises on invalid range + scaler = MinMaxScaler(feature_range=(2, 1)) + with pytest.raises(ValueError): + scaler.fit(X) + + +def test_min_max_scaler_zero_variance_features(): + # Check min max scaler on toy data with zero variance features + X = [[0.0, 1.0, +0.5], [0.0, 1.0, -0.1], [0.0, 1.0, +1.1]] + + X_new = [[+0.0, 2.0, 0.5], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.5]] + + # default params + scaler = MinMaxScaler() + X_trans = scaler.fit_transform(X) + X_expected_0_1 = [[0.0, 0.0, 0.5], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]] + assert_array_almost_equal(X_trans, X_expected_0_1) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + X_trans_new = scaler.transform(X_new) + X_expected_0_1_new = [[+0.0, 1.0, 0.500], [-1.0, 0.0, 0.083], [+0.0, 0.0, 1.333]] + assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2) + + # not default params + scaler = MinMaxScaler(feature_range=(1, 2)) + X_trans = scaler.fit_transform(X) + X_expected_1_2 = [[1.0, 1.0, 1.5], [1.0, 1.0, 1.0], [1.0, 1.0, 2.0]] + assert_array_almost_equal(X_trans, X_expected_1_2) + + # function interface + X_trans = minmax_scale(X) + assert_array_almost_equal(X_trans, X_expected_0_1) + X_trans = minmax_scale(X, feature_range=(1, 2)) + assert_array_almost_equal(X_trans, X_expected_1_2) + + +def test_minmax_scale_axis1(): + X = iris.data + X_trans = minmax_scale(X, axis=1) + assert_array_almost_equal(np.min(X_trans, axis=1), 0) + assert_array_almost_equal(np.max(X_trans, axis=1), 1) + + +def test_min_max_scaler_1d(): + # Test scaling of dataset along single axis + for X in [X_1row, X_1col, X_list_1row, X_list_1row]: + scaler = MinMaxScaler(copy=True) + X_scaled = scaler.fit(X).transform(X) + + if isinstance(X, list): + X = np.array(X) # cast only after scaling done + + if _check_dim_1axis(X) == 1: + assert_array_almost_equal(X_scaled.min(axis=0), np.zeros(n_features)) + assert_array_almost_equal(X_scaled.max(axis=0), np.zeros(n_features)) + else: + assert_array_almost_equal(X_scaled.min(axis=0), 0.0) + assert_array_almost_equal(X_scaled.max(axis=0), 1.0) + assert scaler.n_samples_seen_ == X.shape[0] + + # check inverse transform + X_scaled_back = scaler.inverse_transform(X_scaled) + assert_array_almost_equal(X_scaled_back, X) + + # Constant feature + X = np.ones((5, 1)) + scaler = MinMaxScaler() + X_scaled = scaler.fit(X).transform(X) + assert X_scaled.min() >= 0.0 + assert X_scaled.max() <= 1.0 + assert scaler.n_samples_seen_ == X.shape[0] + + # Function interface + X_1d = X_1row.ravel() + min_ = X_1d.min() + max_ = X_1d.max() + assert_array_almost_equal( + (X_1d - min_) / (max_ - min_), minmax_scale(X_1d, copy=True) + ) + + +@pytest.mark.parametrize("sample_weight", [True, None]) +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_scaler_without_centering(sample_weight, sparse_container): + rng = np.random.RandomState(42) + X = rng.randn(4, 5) + X[:, 0] = 0.0 # first feature is always of zero + X_sparse = sparse_container(X) + + if sample_weight: + sample_weight = rng.rand(X.shape[0]) + + with pytest.raises(ValueError): + StandardScaler().fit(X_sparse) + + scaler = StandardScaler(with_mean=False).fit(X, sample_weight=sample_weight) + X_scaled = scaler.transform(X, copy=True) + assert not np.any(np.isnan(X_scaled)) + + scaler_sparse = StandardScaler(with_mean=False).fit( + X_sparse, sample_weight=sample_weight + ) + X_sparse_scaled = scaler_sparse.transform(X_sparse, copy=True) + assert not np.any(np.isnan(X_sparse_scaled.data)) + + assert_array_almost_equal(scaler.mean_, scaler_sparse.mean_) + assert_array_almost_equal(scaler.var_, scaler_sparse.var_) + assert_array_almost_equal(scaler.scale_, scaler_sparse.scale_) + assert_array_almost_equal(scaler.n_samples_seen_, scaler_sparse.n_samples_seen_) + + if sample_weight is None: + assert_array_almost_equal( + X_scaled.mean(axis=0), [0.0, -0.01, 2.24, -0.35, -0.78], 2 + ) + assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) + + X_sparse_scaled_mean, X_sparse_scaled_var = mean_variance_axis(X_sparse_scaled, 0) + assert_array_almost_equal(X_sparse_scaled_mean, X_scaled.mean(axis=0)) + assert_array_almost_equal(X_sparse_scaled_var, X_scaled.var(axis=0)) + + # Check that X has not been modified (copy) + assert X_scaled is not X + assert X_sparse_scaled is not X_sparse + + X_scaled_back = scaler.inverse_transform(X_scaled) + assert X_scaled_back is not X + assert X_scaled_back is not X_scaled + assert_array_almost_equal(X_scaled_back, X) + + X_sparse_scaled_back = scaler_sparse.inverse_transform(X_sparse_scaled) + assert X_sparse_scaled_back is not X_sparse + assert X_sparse_scaled_back is not X_sparse_scaled + assert_array_almost_equal(X_sparse_scaled_back.toarray(), X) + + if sparse_container in CSR_CONTAINERS: + null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) + X_null = null_transform.fit_transform(X_sparse) + assert_array_equal(X_null.data, X_sparse.data) + X_orig = null_transform.inverse_transform(X_null) + assert_array_equal(X_orig.data, X_sparse.data) + + +@pytest.mark.parametrize("with_mean", [True, False]) +@pytest.mark.parametrize("with_std", [True, False]) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) +def test_scaler_n_samples_seen_with_nan(with_mean, with_std, sparse_container): + X = np.array( + [[0, 1, 3], [np.nan, 6, 10], [5, 4, np.nan], [8, 0, np.nan]], dtype=np.float64 + ) + if sparse_container is not None: + X = sparse_container(X) + + if sparse.issparse(X) and with_mean: + pytest.skip("'with_mean=True' cannot be used with sparse matrix.") + + transformer = StandardScaler(with_mean=with_mean, with_std=with_std) + transformer.fit(X) + + assert_array_equal(transformer.n_samples_seen_, np.array([3, 4, 2])) + + +def _check_identity_scalers_attributes(scaler_1, scaler_2): + assert scaler_1.mean_ is scaler_2.mean_ is None + assert scaler_1.var_ is scaler_2.var_ is None + assert scaler_1.scale_ is scaler_2.scale_ is None + assert scaler_1.n_samples_seen_ == scaler_2.n_samples_seen_ + + +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_scaler_return_identity(sparse_container): + # test that the scaler return identity when with_mean and with_std are + # False + X_dense = np.array([[0, 1, 3], [5, 6, 0], [8, 0, 10]], dtype=np.float64) + X_sparse = sparse_container(X_dense) + + transformer_dense = StandardScaler(with_mean=False, with_std=False) + X_trans_dense = transformer_dense.fit_transform(X_dense) + assert_allclose(X_trans_dense, X_dense) + + transformer_sparse = clone(transformer_dense) + X_trans_sparse = transformer_sparse.fit_transform(X_sparse) + assert_allclose_dense_sparse(X_trans_sparse, X_sparse) + + _check_identity_scalers_attributes(transformer_dense, transformer_sparse) + + transformer_dense.partial_fit(X_dense) + transformer_sparse.partial_fit(X_sparse) + _check_identity_scalers_attributes(transformer_dense, transformer_sparse) + + transformer_dense.fit(X_dense) + transformer_sparse.fit(X_sparse) + _check_identity_scalers_attributes(transformer_dense, transformer_sparse) + + +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_scaler_int(sparse_container): + # test that scaler converts integer input to floating + # for both sparse and dense matrices + rng = np.random.RandomState(42) + X = rng.randint(20, size=(4, 5)) + X[:, 0] = 0 # first feature is always of zero + X_sparse = sparse_container(X) + + with warnings.catch_warnings(record=True): + scaler = StandardScaler(with_mean=False).fit(X) + X_scaled = scaler.transform(X, copy=True) + assert not np.any(np.isnan(X_scaled)) + + with warnings.catch_warnings(record=True): + scaler_sparse = StandardScaler(with_mean=False).fit(X_sparse) + X_sparse_scaled = scaler_sparse.transform(X_sparse, copy=True) + assert not np.any(np.isnan(X_sparse_scaled.data)) + + assert_array_almost_equal(scaler.mean_, scaler_sparse.mean_) + assert_array_almost_equal(scaler.var_, scaler_sparse.var_) + assert_array_almost_equal(scaler.scale_, scaler_sparse.scale_) + + assert_array_almost_equal( + X_scaled.mean(axis=0), [0.0, 1.109, 1.856, 21.0, 1.559], 2 + ) + assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) + + X_sparse_scaled_mean, X_sparse_scaled_std = mean_variance_axis( + X_sparse_scaled.astype(float), 0 + ) + assert_array_almost_equal(X_sparse_scaled_mean, X_scaled.mean(axis=0)) + assert_array_almost_equal(X_sparse_scaled_std, X_scaled.std(axis=0)) + + # Check that X has not been modified (copy) + assert X_scaled is not X + assert X_sparse_scaled is not X_sparse + + X_scaled_back = scaler.inverse_transform(X_scaled) + assert X_scaled_back is not X + assert X_scaled_back is not X_scaled + assert_array_almost_equal(X_scaled_back, X) + + X_sparse_scaled_back = scaler_sparse.inverse_transform(X_sparse_scaled) + assert X_sparse_scaled_back is not X_sparse + assert X_sparse_scaled_back is not X_sparse_scaled + assert_array_almost_equal(X_sparse_scaled_back.toarray(), X) + + if sparse_container in CSR_CONTAINERS: + null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) + with warnings.catch_warnings(record=True): + X_null = null_transform.fit_transform(X_sparse) + assert_array_equal(X_null.data, X_sparse.data) + X_orig = null_transform.inverse_transform(X_null) + assert_array_equal(X_orig.data, X_sparse.data) + + +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_scaler_without_copy(sparse_container): + # Check that StandardScaler.fit does not change input + rng = np.random.RandomState(42) + X = rng.randn(4, 5) + X[:, 0] = 0.0 # first feature is always of zero + X_sparse = sparse_container(X) + + X_copy = X.copy() + StandardScaler(copy=False).fit(X) + assert_array_equal(X, X_copy) + + X_sparse_copy = X_sparse.copy() + StandardScaler(with_mean=False, copy=False).fit(X_sparse) + assert_array_equal(X_sparse.toarray(), X_sparse_copy.toarray()) + + +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_scale_sparse_with_mean_raise_exception(sparse_container): + rng = np.random.RandomState(42) + X = rng.randn(4, 5) + X_sparse = sparse_container(X) + + # check scaling and fit with direct calls on sparse data + with pytest.raises(ValueError): + scale(X_sparse, with_mean=True) + with pytest.raises(ValueError): + StandardScaler(with_mean=True).fit(X_sparse) + + # check transform and inverse_transform after a fit on a dense array + scaler = StandardScaler(with_mean=True).fit(X) + with pytest.raises(ValueError): + scaler.transform(X_sparse) + + X_transformed_sparse = sparse_container(scaler.transform(X)) + with pytest.raises(ValueError): + scaler.inverse_transform(X_transformed_sparse) + + +def test_scale_input_finiteness_validation(): + # Check if non finite inputs raise ValueError + X = [[np.inf, 5, 6, 7, 8]] + with pytest.raises( + ValueError, match="Input contains infinity or a value too large" + ): + scale(X) + + +def test_robust_scaler_error_sparse(): + X_sparse = sparse.rand(1000, 10) + scaler = RobustScaler(with_centering=True) + err_msg = "Cannot center sparse matrices" + with pytest.raises(ValueError, match=err_msg): + scaler.fit(X_sparse) + + +@pytest.mark.parametrize("with_centering", [True, False]) +@pytest.mark.parametrize("with_scaling", [True, False]) +@pytest.mark.parametrize("X", [np.random.randn(10, 3), sparse.rand(10, 3, density=0.5)]) +def test_robust_scaler_attributes(X, with_centering, with_scaling): + # check consistent type of attributes + if with_centering and sparse.issparse(X): + pytest.skip("RobustScaler cannot center sparse matrix") + + scaler = RobustScaler(with_centering=with_centering, with_scaling=with_scaling) + scaler.fit(X) + + if with_centering: + assert isinstance(scaler.center_, np.ndarray) + else: + assert scaler.center_ is None + if with_scaling: + assert isinstance(scaler.scale_, np.ndarray) + else: + assert scaler.scale_ is None + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_robust_scaler_col_zero_sparse(csr_container): + # check that the scaler is working when there is not data materialized in a + # column of a sparse matrix + X = np.random.randn(10, 5) + X[:, 0] = 0 + X = csr_container(X) + + scaler = RobustScaler(with_centering=False) + scaler.fit(X) + assert scaler.scale_[0] == pytest.approx(1) + + X_trans = scaler.transform(X) + assert_allclose(X[:, [0]].toarray(), X_trans[:, [0]].toarray()) + + +def test_robust_scaler_2d_arrays(): + # Test robust scaling of 2d array along first axis + rng = np.random.RandomState(0) + X = rng.randn(4, 5) + X[:, 0] = 0.0 # first feature is always of zero + + scaler = RobustScaler() + X_scaled = scaler.fit(X).transform(X) + + assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0]) + assert_array_almost_equal(X_scaled.std(axis=0)[0], 0) + + +@pytest.mark.parametrize("density", [0, 0.05, 0.1, 0.5, 1]) +@pytest.mark.parametrize("strictly_signed", ["positive", "negative", "zeros", None]) +def test_robust_scaler_equivalence_dense_sparse(density, strictly_signed): + # Check the equivalence of the fitting with dense and sparse matrices + X_sparse = sparse.rand(1000, 5, density=density).tocsc() + if strictly_signed == "positive": + X_sparse.data = np.abs(X_sparse.data) + elif strictly_signed == "negative": + X_sparse.data = -np.abs(X_sparse.data) + elif strictly_signed == "zeros": + X_sparse.data = np.zeros(X_sparse.data.shape, dtype=np.float64) + X_dense = X_sparse.toarray() + + scaler_sparse = RobustScaler(with_centering=False) + scaler_dense = RobustScaler(with_centering=False) + + scaler_sparse.fit(X_sparse) + scaler_dense.fit(X_dense) + + assert_allclose(scaler_sparse.scale_, scaler_dense.scale_) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_robust_scaler_transform_one_row_csr(csr_container): + # Check RobustScaler on transforming csr matrix with one row + rng = np.random.RandomState(0) + X = rng.randn(4, 5) + single_row = np.array([[0.1, 1.0, 2.0, 0.0, -1.0]]) + scaler = RobustScaler(with_centering=False) + scaler = scaler.fit(X) + row_trans = scaler.transform(csr_container(single_row)) + row_expected = single_row / scaler.scale_ + assert_array_almost_equal(row_trans.toarray(), row_expected) + row_scaled_back = scaler.inverse_transform(row_trans) + assert_array_almost_equal(single_row, row_scaled_back.toarray()) + + +def test_robust_scaler_iris(): + X = iris.data + scaler = RobustScaler() + X_trans = scaler.fit_transform(X) + assert_array_almost_equal(np.median(X_trans, axis=0), 0) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + q = np.percentile(X_trans, q=(25, 75), axis=0) + iqr = q[1] - q[0] + assert_array_almost_equal(iqr, 1) + + +def test_robust_scaler_iris_quantiles(): + X = iris.data + scaler = RobustScaler(quantile_range=(10, 90)) + X_trans = scaler.fit_transform(X) + assert_array_almost_equal(np.median(X_trans, axis=0), 0) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + q = np.percentile(X_trans, q=(10, 90), axis=0) + q_range = q[1] - q[0] + assert_array_almost_equal(q_range, 1) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_quantile_transform_iris(csc_container): + X = iris.data + # uniform output distribution + transformer = QuantileTransformer(n_quantiles=30) + X_trans = transformer.fit_transform(X) + X_trans_inv = transformer.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + # normal output distribution + transformer = QuantileTransformer(n_quantiles=30, output_distribution="normal") + X_trans = transformer.fit_transform(X) + X_trans_inv = transformer.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + # make sure it is possible to take the inverse of a sparse matrix + # which contain negative value; this is the case in the iris dataset + X_sparse = csc_container(X) + X_sparse_tran = transformer.fit_transform(X_sparse) + X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran) + assert_array_almost_equal(X_sparse.toarray(), X_sparse_tran_inv.toarray()) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_quantile_transform_check_error(csc_container): + X = np.transpose( + [ + [0, 25, 50, 0, 0, 0, 75, 0, 0, 100], + [2, 4, 0, 0, 6, 8, 0, 10, 0, 0], + [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1], + ] + ) + X = csc_container(X) + X_neg = np.transpose( + [ + [0, 25, 50, 0, 0, 0, 75, 0, 0, 100], + [-2, 4, 0, 0, 6, 8, 0, 10, 0, 0], + [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1], + ] + ) + X_neg = csc_container(X_neg) + + err_msg = ( + "The number of quantiles cannot be greater than " + "the number of samples used. Got 1000 quantiles " + "and 10 samples." + ) + with pytest.raises(ValueError, match=err_msg): + QuantileTransformer(subsample=10).fit(X) + + transformer = QuantileTransformer(n_quantiles=10) + err_msg = "QuantileTransformer only accepts non-negative sparse matrices." + with pytest.raises(ValueError, match=err_msg): + transformer.fit(X_neg) + transformer.fit(X) + err_msg = "QuantileTransformer only accepts non-negative sparse matrices." + with pytest.raises(ValueError, match=err_msg): + transformer.transform(X_neg) + + X_bad_feat = np.transpose( + [[0, 25, 50, 0, 0, 0, 75, 0, 0, 100], [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]] + ) + err_msg = ( + "X has 2 features, but QuantileTransformer is expecting 3 features as input." + ) + with pytest.raises(ValueError, match=err_msg): + transformer.inverse_transform(X_bad_feat) + + transformer = QuantileTransformer(n_quantiles=10).fit(X) + # check that an error is raised if input is scalar + with pytest.raises(ValueError, match="Expected 2D array, got scalar array instead"): + transformer.transform(10) + # check that a warning is raised is n_quantiles > n_samples + transformer = QuantileTransformer(n_quantiles=100) + warn_msg = "n_quantiles is set to n_samples" + with pytest.warns(UserWarning, match=warn_msg) as record: + transformer.fit(X) + assert len(record) == 1 + assert transformer.n_quantiles_ == X.shape[0] + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_quantile_transform_sparse_ignore_zeros(csc_container): + X = np.array([[0, 1], [0, 0], [0, 2], [0, 2], [0, 1]]) + X_sparse = csc_container(X) + transformer = QuantileTransformer(ignore_implicit_zeros=True, n_quantiles=5) + + # dense case -> warning raise + warning_message = ( + "'ignore_implicit_zeros' takes effect" + " only with sparse matrix. This parameter has no" + " effect." + ) + with pytest.warns(UserWarning, match=warning_message): + transformer.fit(X) + + X_expected = np.array([[0, 0], [0, 0], [0, 1], [0, 1], [0, 0]]) + X_trans = transformer.fit_transform(X_sparse) + assert_almost_equal(X_expected, X_trans.toarray()) + + # consider the case where sparse entries are missing values and user-given + # zeros are to be considered + X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0]) + X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]) + X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + X_sparse = csc_container((X_data, (X_row, X_col))) + X_trans = transformer.fit_transform(X_sparse) + X_expected = np.array( + [ + [0.0, 0.5], + [0.0, 0.0], + [0.0, 1.0], + [0.0, 1.0], + [0.0, 0.5], + [0.0, 0.0], + [0.0, 0.5], + [0.0, 1.0], + [0.0, 0.0], + ] + ) + assert_almost_equal(X_expected, X_trans.toarray()) + + transformer = QuantileTransformer(ignore_implicit_zeros=True, n_quantiles=5) + X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1]) + X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1]) + X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6]) + X_sparse = csc_container((X_data, (X_row, X_col))) + X_trans = transformer.fit_transform(X_sparse) + X_expected = np.array( + [[0, 1], [0, 0.375], [0, 0.375], [0, 0.375], [0, 1], [0, 0], [0, 1]] + ) + assert_almost_equal(X_expected, X_trans.toarray()) + assert_almost_equal( + X_sparse.toarray(), transformer.inverse_transform(X_trans).toarray() + ) + + # check in conjunction with subsampling + transformer = QuantileTransformer( + ignore_implicit_zeros=True, n_quantiles=5, subsample=8, random_state=0 + ) + X_trans = transformer.fit_transform(X_sparse) + assert_almost_equal(X_expected, X_trans.toarray()) + assert_almost_equal( + X_sparse.toarray(), transformer.inverse_transform(X_trans).toarray() + ) + + +def test_quantile_transform_dense_toy(): + X = np.array( + [[0, 2, 2.6], [25, 4, 4.1], [50, 6, 2.3], [75, 8, 9.5], [100, 10, 0.1]] + ) + + transformer = QuantileTransformer(n_quantiles=5) + transformer.fit(X) + + # using a uniform output, each entry of X should be map between 0 and 1 + # and equally spaced + X_trans = transformer.fit_transform(X) + X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T + assert_almost_equal(np.sort(X_trans, axis=0), X_expected) + + X_test = np.array( + [ + [-1, 1, 0], + [101, 11, 10], + ] + ) + X_expected = np.array( + [ + [0, 0, 0], + [1, 1, 1], + ] + ) + assert_array_almost_equal(transformer.transform(X_test), X_expected) + + X_trans_inv = transformer.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + +def test_quantile_transform_subsampling(): + # Test that subsampling the input yield to a consistent results We check + # that the computed quantiles are almost mapped to a [0, 1] vector where + # values are equally spaced. The infinite norm is checked to be smaller + # than a given threshold. This is repeated 5 times. + + # dense support + n_samples = 1000000 + n_quantiles = 1000 + X = np.sort(np.random.sample((n_samples, 1)), axis=0) + ROUND = 5 + inf_norm_arr = [] + for random_state in range(ROUND): + transformer = QuantileTransformer( + random_state=random_state, + n_quantiles=n_quantiles, + subsample=n_samples // 10, + ) + transformer.fit(X) + diff = np.linspace(0, 1, n_quantiles) - np.ravel(transformer.quantiles_) + inf_norm = np.max(np.abs(diff)) + assert inf_norm < 1e-2 + inf_norm_arr.append(inf_norm) + # each random subsampling yield a unique approximation to the expected + # linspace CDF + assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr) + + # sparse support + + X = sparse.rand(n_samples, 1, density=0.99, format="csc", random_state=0) + inf_norm_arr = [] + for random_state in range(ROUND): + transformer = QuantileTransformer( + random_state=random_state, + n_quantiles=n_quantiles, + subsample=n_samples // 10, + ) + transformer.fit(X) + diff = np.linspace(0, 1, n_quantiles) - np.ravel(transformer.quantiles_) + inf_norm = np.max(np.abs(diff)) + assert inf_norm < 1e-1 + inf_norm_arr.append(inf_norm) + # each random subsampling yield a unique approximation to the expected + # linspace CDF + assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_quantile_transform_sparse_toy(csc_container): + X = np.array( + [ + [0.0, 2.0, 0.0], + [25.0, 4.0, 0.0], + [50.0, 0.0, 2.6], + [0.0, 0.0, 4.1], + [0.0, 6.0, 0.0], + [0.0, 8.0, 0.0], + [75.0, 0.0, 2.3], + [0.0, 10.0, 0.0], + [0.0, 0.0, 9.5], + [100.0, 0.0, 0.1], + ] + ) + + X = csc_container(X) + + transformer = QuantileTransformer(n_quantiles=10) + transformer.fit(X) + + X_trans = transformer.fit_transform(X) + assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.0) + assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.0) + + X_trans_inv = transformer.inverse_transform(X_trans) + assert_array_almost_equal(X.toarray(), X_trans_inv.toarray()) + + transformer_dense = QuantileTransformer(n_quantiles=10).fit(X.toarray()) + + X_trans = transformer_dense.transform(X) + assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.0) + assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.0) + + X_trans_inv = transformer_dense.inverse_transform(X_trans) + assert_array_almost_equal(X.toarray(), X_trans_inv.toarray()) + + +def test_quantile_transform_axis1(): + X = np.array([[0, 25, 50, 75, 100], [2, 4, 6, 8, 10], [2.6, 4.1, 2.3, 9.5, 0.1]]) + + X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5) + X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5) + assert_array_almost_equal(X_trans_a0, X_trans_a1.T) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_quantile_transform_bounds(csc_container): + # Lower and upper bounds are manually mapped. We checked that in the case + # of a constant feature and binary feature, the bounds are properly mapped. + X_dense = np.array([[0, 0], [0, 0], [1, 0]]) + X_sparse = csc_container(X_dense) + + # check sparse and dense are consistent + X_trans = QuantileTransformer(n_quantiles=3, random_state=0).fit_transform(X_dense) + assert_array_almost_equal(X_trans, X_dense) + X_trans_sp = QuantileTransformer(n_quantiles=3, random_state=0).fit_transform( + X_sparse + ) + assert_array_almost_equal(X_trans_sp.toarray(), X_dense) + assert_array_almost_equal(X_trans, X_trans_sp.toarray()) + + # check the consistency of the bounds by learning on 1 matrix + # and transforming another + X = np.array([[0, 1], [0, 0.5], [1, 0]]) + X1 = np.array([[0, 0.1], [0, 0.5], [1, 0.1]]) + transformer = QuantileTransformer(n_quantiles=3).fit(X) + X_trans = transformer.transform(X1) + assert_array_almost_equal(X_trans, X1) + + # check that values outside of the range learned will be mapped properly. + X = np.random.random((1000, 1)) + transformer = QuantileTransformer() + transformer.fit(X) + assert transformer.transform([[-10]]) == transformer.transform([[np.min(X)]]) + assert transformer.transform([[10]]) == transformer.transform([[np.max(X)]]) + assert transformer.inverse_transform([[-10]]) == transformer.inverse_transform( + [[np.min(transformer.references_)]] + ) + assert transformer.inverse_transform([[10]]) == transformer.inverse_transform( + [[np.max(transformer.references_)]] + ) + + +def test_quantile_transform_and_inverse(): + X_1 = iris.data + X_2 = np.array([[0.0], [BOUNDS_THRESHOLD / 10], [1.5], [2], [3], [3], [4]]) + for X in [X_1, X_2]: + transformer = QuantileTransformer(n_quantiles=1000, random_state=0) + X_trans = transformer.fit_transform(X) + X_trans_inv = transformer.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv, decimal=9) + + +def test_quantile_transform_nan(): + X = np.array([[np.nan, 0, 0, 1], [np.nan, np.nan, 0, 0.5], [np.nan, 1, 1, 0]]) + + transformer = QuantileTransformer(n_quantiles=10, random_state=42) + transformer.fit_transform(X) + + # check that the quantile of the first column is all NaN + assert np.isnan(transformer.quantiles_[:, 0]).all() + # all other column should not contain NaN + assert not np.isnan(transformer.quantiles_[:, 1:]).any() + + +@pytest.mark.parametrize("array_type", ["array", "sparse"]) +def test_quantile_transformer_sorted_quantiles(array_type): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/15733 + # Taken from upstream bug report: + # https://github.com/numpy/numpy/issues/14685 + X = np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, 8, 8, 7] * 10) + X = 0.1 * X.reshape(-1, 1) + X = _convert_container(X, array_type) + + n_quantiles = 100 + qt = QuantileTransformer(n_quantiles=n_quantiles).fit(X) + + # Check that the estimated quantile thresholds are monotically + # increasing: + quantiles = qt.quantiles_[:, 0] + assert len(quantiles) == 100 + assert all(np.diff(quantiles) >= 0) + + +def test_robust_scaler_invalid_range(): + for range_ in [ + (-1, 90), + (-2, -3), + (10, 101), + (100.5, 101), + (90, 50), + ]: + scaler = RobustScaler(quantile_range=range_) + + with pytest.raises(ValueError, match=r"Invalid quantile range: \("): + scaler.fit(iris.data) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_scale_function_without_centering(csr_container): + rng = np.random.RandomState(42) + X = rng.randn(4, 5) + X[:, 0] = 0.0 # first feature is always of zero + X_csr = csr_container(X) + + X_scaled = scale(X, with_mean=False) + assert not np.any(np.isnan(X_scaled)) + + X_csr_scaled = scale(X_csr, with_mean=False) + assert not np.any(np.isnan(X_csr_scaled.data)) + + # test csc has same outcome + X_csc_scaled = scale(X_csr.tocsc(), with_mean=False) + assert_array_almost_equal(X_scaled, X_csc_scaled.toarray()) + + # raises value error on axis != 0 + with pytest.raises(ValueError): + scale(X_csr, with_mean=False, axis=1) + + assert_array_almost_equal( + X_scaled.mean(axis=0), [0.0, -0.01, 2.24, -0.35, -0.78], 2 + ) + assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) + # Check that X has not been copied + assert X_scaled is not X + + X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) + assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) + assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) + + # null scale + X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True) + assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray()) + + +def test_robust_scale_axis1(): + X = iris.data + X_trans = robust_scale(X, axis=1) + assert_array_almost_equal(np.median(X_trans, axis=1), 0) + q = np.percentile(X_trans, q=(25, 75), axis=1) + iqr = q[1] - q[0] + assert_array_almost_equal(iqr, 1) + + +def test_robust_scale_1d_array(): + X = iris.data[:, 1] + X_trans = robust_scale(X) + assert_array_almost_equal(np.median(X_trans), 0) + q = np.percentile(X_trans, q=(25, 75)) + iqr = q[1] - q[0] + assert_array_almost_equal(iqr, 1) + + +def test_robust_scaler_zero_variance_features(): + # Check RobustScaler on toy data with zero variance features + X = [[0.0, 1.0, +0.5], [0.0, 1.0, -0.1], [0.0, 1.0, +1.1]] + + scaler = RobustScaler() + X_trans = scaler.fit_transform(X) + + # NOTE: for such a small sample size, what we expect in the third column + # depends HEAVILY on the method used to calculate quantiles. The values + # here were calculated to fit the quantiles produces by np.percentile + # using numpy 1.9 Calculating quantiles with + # scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles + # would yield very different results! + X_expected = [[0.0, 0.0, +0.0], [0.0, 0.0, -1.0], [0.0, 0.0, +1.0]] + assert_array_almost_equal(X_trans, X_expected) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + # make sure new data gets transformed correctly + X_new = [[+0.0, 2.0, 0.5], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.5]] + X_trans_new = scaler.transform(X_new) + X_expected_new = [[+0.0, 1.0, +0.0], [-1.0, 0.0, -0.83333], [+0.0, 0.0, +1.66667]] + assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3) + + +def test_robust_scaler_unit_variance(): + # Check RobustScaler with unit_variance=True on standard normal data with + # outliers + rng = np.random.RandomState(42) + X = rng.randn(1000000, 1) + X_with_outliers = np.vstack([X, np.ones((100, 1)) * 100, np.ones((100, 1)) * -100]) + + quantile_range = (1, 99) + robust_scaler = RobustScaler(quantile_range=quantile_range, unit_variance=True).fit( + X_with_outliers + ) + X_trans = robust_scaler.transform(X) + + assert robust_scaler.center_ == pytest.approx(0, abs=1e-3) + assert robust_scaler.scale_ == pytest.approx(1, abs=1e-2) + assert X_trans.std() == pytest.approx(1, abs=1e-2) + + +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_maxabs_scaler_zero_variance_features(sparse_container): + # Check MaxAbsScaler on toy data with zero variance features + X = [[0.0, 1.0, +0.5], [0.0, 1.0, -0.3], [0.0, 1.0, +1.5], [0.0, 0.0, +0.0]] + + scaler = MaxAbsScaler() + X_trans = scaler.fit_transform(X) + X_expected = [ + [0.0, 1.0, 1.0 / 3.0], + [0.0, 1.0, -0.2], + [0.0, 1.0, 1.0], + [0.0, 0.0, 0.0], + ] + assert_array_almost_equal(X_trans, X_expected) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + # make sure new data gets transformed correctly + X_new = [[+0.0, 2.0, 0.5], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.5]] + X_trans_new = scaler.transform(X_new) + X_expected_new = [[+0.0, 2.0, 1.0 / 3.0], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.0]] + + assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2) + + # function interface + X_trans = maxabs_scale(X) + assert_array_almost_equal(X_trans, X_expected) + + # sparse data + X_sparse = sparse_container(X) + X_trans_sparse = scaler.fit_transform(X_sparse) + X_expected = [ + [0.0, 1.0, 1.0 / 3.0], + [0.0, 1.0, -0.2], + [0.0, 1.0, 1.0], + [0.0, 0.0, 0.0], + ] + assert_array_almost_equal(X_trans_sparse.toarray(), X_expected) + X_trans_sparse_inv = scaler.inverse_transform(X_trans_sparse) + assert_array_almost_equal(X, X_trans_sparse_inv.toarray()) + + +def test_maxabs_scaler_large_negative_value(): + # Check MaxAbsScaler on toy data with a large negative value + X = [ + [0.0, 1.0, +0.5, -1.0], + [0.0, 1.0, -0.3, -0.5], + [0.0, 1.0, -100.0, 0.0], + [0.0, 0.0, +0.0, -2.0], + ] + + scaler = MaxAbsScaler() + X_trans = scaler.fit_transform(X) + X_expected = [ + [0.0, 1.0, 0.005, -0.5], + [0.0, 1.0, -0.003, -0.25], + [0.0, 1.0, -1.0, 0.0], + [0.0, 0.0, 0.0, -1.0], + ] + assert_array_almost_equal(X_trans, X_expected) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_maxabs_scaler_transform_one_row_csr(csr_container): + # Check MaxAbsScaler on transforming csr matrix with one row + X = csr_container([[0.5, 1.0, 1.0]]) + scaler = MaxAbsScaler() + scaler = scaler.fit(X) + X_trans = scaler.transform(X) + X_expected = csr_container([[1.0, 1.0, 1.0]]) + assert_array_almost_equal(X_trans.toarray(), X_expected.toarray()) + X_scaled_back = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X.toarray(), X_scaled_back.toarray()) + + +def test_maxabs_scaler_1d(): + # Test scaling of dataset along single axis + for X in [X_1row, X_1col, X_list_1row, X_list_1row]: + scaler = MaxAbsScaler(copy=True) + X_scaled = scaler.fit(X).transform(X) + + if isinstance(X, list): + X = np.array(X) # cast only after scaling done + + if _check_dim_1axis(X) == 1: + assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), np.ones(n_features)) + else: + assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.0) + assert scaler.n_samples_seen_ == X.shape[0] + + # check inverse transform + X_scaled_back = scaler.inverse_transform(X_scaled) + assert_array_almost_equal(X_scaled_back, X) + + # Constant feature + X = np.ones((5, 1)) + scaler = MaxAbsScaler() + X_scaled = scaler.fit(X).transform(X) + assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.0) + assert scaler.n_samples_seen_ == X.shape[0] + + # function interface + X_1d = X_1row.ravel() + max_abs = np.abs(X_1d).max() + assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_maxabs_scaler_partial_fit(csr_container): + # Test if partial_fit run over many batches of size 1 and 50 + # gives the same results as fit + X = X_2d[:100, :] + n = X.shape[0] + + for chunk_size in [1, 2, 50, n, n + 42]: + # Test mean at the end of the process + scaler_batch = MaxAbsScaler().fit(X) + + scaler_incr = MaxAbsScaler() + scaler_incr_csr = MaxAbsScaler() + scaler_incr_csc = MaxAbsScaler() + for batch in gen_batches(n, chunk_size): + scaler_incr = scaler_incr.partial_fit(X[batch]) + X_csr = csr_container(X[batch]) + scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr) + X_csc = csr_container(X[batch]) + scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc) + + assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_) + assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr_csr.max_abs_) + assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr_csc.max_abs_) + assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ + assert scaler_batch.n_samples_seen_ == scaler_incr_csr.n_samples_seen_ + assert scaler_batch.n_samples_seen_ == scaler_incr_csc.n_samples_seen_ + assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) + assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_) + assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_) + assert_array_almost_equal(scaler_batch.transform(X), scaler_incr.transform(X)) + + # Test std after 1 step + batch0 = slice(0, chunk_size) + scaler_batch = MaxAbsScaler().fit(X[batch0]) + scaler_incr = MaxAbsScaler().partial_fit(X[batch0]) + + assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_) + assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ + assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) + assert_array_almost_equal(scaler_batch.transform(X), scaler_incr.transform(X)) + + # Test std until the end of partial fits, and + scaler_batch = MaxAbsScaler().fit(X) + scaler_incr = MaxAbsScaler() # Clean estimator + for i, batch in enumerate(gen_batches(n, chunk_size)): + scaler_incr = scaler_incr.partial_fit(X[batch]) + assert_correct_incr( + i, + batch_start=batch.start, + batch_stop=batch.stop, + n=n, + chunk_size=chunk_size, + n_samples_seen=scaler_incr.n_samples_seen_, + ) + + +def check_normalizer(norm, X_norm): + """ + Convenient checking function for `test_normalizer_l1_l2_max` and + `test_normalizer_l1_l2_max_non_csr` + """ + if norm == "l1": + row_sums = np.abs(X_norm).sum(axis=1) + for i in range(3): + assert_almost_equal(row_sums[i], 1.0) + assert_almost_equal(row_sums[3], 0.0) + elif norm == "l2": + for i in range(3): + assert_almost_equal(la.norm(X_norm[i]), 1.0) + assert_almost_equal(la.norm(X_norm[3]), 0.0) + elif norm == "max": + row_maxs = abs(X_norm).max(axis=1) + for i in range(3): + assert_almost_equal(row_maxs[i], 1.0) + assert_almost_equal(row_maxs[3], 0.0) + + +@pytest.mark.parametrize("norm", ["l1", "l2", "max"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_normalizer_l1_l2_max(norm, csr_container): + rng = np.random.RandomState(0) + X_dense = rng.randn(4, 5) + X_sparse_unpruned = csr_container(X_dense) + + # set the row number 3 to zero + X_dense[3, :] = 0.0 + + # set the row number 3 to zero without pruning (can happen in real life) + indptr_3 = X_sparse_unpruned.indptr[3] + indptr_4 = X_sparse_unpruned.indptr[4] + X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 + + # build the pruned variant using the regular constructor + X_sparse_pruned = csr_container(X_dense) + + # check inputs that support the no-copy optim + for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): + normalizer = Normalizer(norm=norm, copy=True) + X_norm1 = normalizer.transform(X) + assert X_norm1 is not X + X_norm1 = toarray(X_norm1) + + normalizer = Normalizer(norm=norm, copy=False) + X_norm2 = normalizer.transform(X) + assert X_norm2 is X + X_norm2 = toarray(X_norm2) + + for X_norm in (X_norm1, X_norm2): + check_normalizer(norm, X_norm) + + +@pytest.mark.parametrize("norm", ["l1", "l2", "max"]) +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + LIL_CONTAINERS +) +def test_normalizer_l1_l2_max_non_csr(norm, sparse_container): + rng = np.random.RandomState(0) + X_dense = rng.randn(4, 5) + + # set the row number 3 to zero + X_dense[3, :] = 0.0 + + X = sparse_container(X_dense) + X_norm = Normalizer(norm=norm, copy=False).transform(X) + + assert X_norm is not X + assert sparse.issparse(X_norm) and X_norm.format == "csr" + + X_norm = toarray(X_norm) + check_normalizer(norm, X_norm) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_normalizer_max_sign(csr_container): + # check that we normalize by a positive number even for negative data + rng = np.random.RandomState(0) + X_dense = rng.randn(4, 5) + # set the row number 3 to zero + X_dense[3, :] = 0.0 + # check for mixed data where the value with + # largest magnitude is negative + X_dense[2, abs(X_dense[2, :]).argmax()] *= -1 + X_all_neg = -np.abs(X_dense) + X_all_neg_sparse = csr_container(X_all_neg) + + for X in (X_dense, X_all_neg, X_all_neg_sparse): + normalizer = Normalizer(norm="max") + X_norm = normalizer.transform(X) + assert X_norm is not X + X_norm = toarray(X_norm) + assert_array_equal(np.sign(X_norm), np.sign(toarray(X))) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_normalize(csr_container): + # Test normalize function + # Only tests functionality not used by the tests for Normalizer. + X = np.random.RandomState(37).randn(3, 2) + assert_array_equal(normalize(X, copy=False), normalize(X.T, axis=0, copy=False).T) + + rs = np.random.RandomState(0) + X_dense = rs.randn(10, 5) + X_sparse = csr_container(X_dense) + ones = np.ones((10)) + for X in (X_dense, X_sparse): + for dtype in (np.float32, np.float64): + for norm in ("l1", "l2"): + X = X.astype(dtype) + X_norm = normalize(X, norm=norm) + assert X_norm.dtype == dtype + + X_norm = toarray(X_norm) + if norm == "l1": + row_sums = np.abs(X_norm).sum(axis=1) + else: + X_norm_squared = X_norm**2 + row_sums = X_norm_squared.sum(axis=1) + + assert_array_almost_equal(row_sums, ones) + + # Test return_norm + X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]]) + for norm in ("l1", "l2", "max"): + _, norms = normalize(X_dense, norm=norm, return_norm=True) + if norm == "l1": + assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0])) + elif norm == "l2": + assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127])) + else: + assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0])) + + X_sparse = csr_container(X_dense) + for norm in ("l1", "l2"): + with pytest.raises(NotImplementedError): + normalize(X_sparse, norm=norm, return_norm=True) + _, norms = normalize(X_sparse, norm="max", return_norm=True) + assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0])) + + +@pytest.mark.parametrize( + "constructor", [np.array, list] + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_binarizer(constructor): + X_ = np.array([[1, 0, 5], [2, 3, -1]]) + X = constructor(X_.copy()) + + binarizer = Binarizer(threshold=2.0, copy=True) + X_bin = toarray(binarizer.transform(X)) + assert np.sum(X_bin == 0) == 4 + assert np.sum(X_bin == 1) == 2 + X_bin = binarizer.transform(X) + assert sparse.issparse(X) == sparse.issparse(X_bin) + + binarizer = Binarizer(copy=True).fit(X) + X_bin = toarray(binarizer.transform(X)) + assert X_bin is not X + assert np.sum(X_bin == 0) == 2 + assert np.sum(X_bin == 1) == 4 + + binarizer = Binarizer(copy=True) + X_bin = binarizer.transform(X) + assert X_bin is not X + X_bin = toarray(X_bin) + assert np.sum(X_bin == 0) == 2 + assert np.sum(X_bin == 1) == 4 + + binarizer = Binarizer(copy=False) + X_bin = binarizer.transform(X) + if constructor is not list: + assert X_bin is X + + binarizer = Binarizer(copy=False) + X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64) + X_bin = binarizer.transform(X_float) + if constructor is not list: + assert X_bin is X_float + + X_bin = toarray(X_bin) + assert np.sum(X_bin == 0) == 2 + assert np.sum(X_bin == 1) == 4 + + binarizer = Binarizer(threshold=-0.5, copy=True) + if constructor in (np.array, list): + X = constructor(X_.copy()) + + X_bin = toarray(binarizer.transform(X)) + assert np.sum(X_bin == 0) == 1 + assert np.sum(X_bin == 1) == 5 + X_bin = binarizer.transform(X) + + # Cannot use threshold < 0 for sparse + if constructor in CSC_CONTAINERS: + with pytest.raises(ValueError): + binarizer.transform(constructor(X)) + + +def test_center_kernel(): + # Test that KernelCenterer is equivalent to StandardScaler + # in feature space + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + scaler = StandardScaler(with_std=False) + scaler.fit(X_fit) + X_fit_centered = scaler.transform(X_fit) + K_fit = np.dot(X_fit, X_fit.T) + + # center fit time matrix + centerer = KernelCenterer() + K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T) + K_fit_centered2 = centerer.fit_transform(K_fit) + assert_array_almost_equal(K_fit_centered, K_fit_centered2) + + # center predict time matrix + X_pred = rng.random_sample((2, 4)) + K_pred = np.dot(X_pred, X_fit.T) + X_pred_centered = scaler.transform(X_pred) + K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T) + K_pred_centered2 = centerer.transform(K_pred) + assert_array_almost_equal(K_pred_centered, K_pred_centered2) + + # check the results coherence with the method proposed in: + # B. Schölkopf, A. Smola, and K.R. Müller, + # "Nonlinear component analysis as a kernel eigenvalue problem" + # equation (B.3) + + # K_centered3 = (I - 1_M) K (I - 1_M) + # = K - 1_M K - K 1_M + 1_M K 1_M + ones_M = np.ones_like(K_fit) / K_fit.shape[0] + K_fit_centered3 = K_fit - ones_M @ K_fit - K_fit @ ones_M + ones_M @ K_fit @ ones_M + assert_allclose(K_fit_centered, K_fit_centered3) + + # K_test_centered3 = (K_test - 1'_M K)(I - 1_M) + # = K_test - 1'_M K - K_test 1_M + 1'_M K 1_M + ones_prime_M = np.ones_like(K_pred) / K_fit.shape[0] + K_pred_centered3 = ( + K_pred - ones_prime_M @ K_fit - K_pred @ ones_M + ones_prime_M @ K_fit @ ones_M + ) + assert_allclose(K_pred_centered, K_pred_centered3) + + +def test_kernelcenterer_non_linear_kernel(): + """Check kernel centering for non-linear kernel.""" + rng = np.random.RandomState(0) + X, X_test = rng.randn(100, 50), rng.randn(20, 50) + + def phi(X): + """Our mapping function phi.""" + return np.vstack( + [ + np.clip(X, a_min=0, a_max=None), + -np.clip(X, a_min=None, a_max=0), + ] + ) + + phi_X = phi(X) + phi_X_test = phi(X_test) + + # centered the projection + scaler = StandardScaler(with_std=False) + phi_X_center = scaler.fit_transform(phi_X) + phi_X_test_center = scaler.transform(phi_X_test) + + # create the different kernel + K = phi_X @ phi_X.T + K_test = phi_X_test @ phi_X.T + K_center = phi_X_center @ phi_X_center.T + K_test_center = phi_X_test_center @ phi_X_center.T + + kernel_centerer = KernelCenterer() + kernel_centerer.fit(K) + + assert_allclose(kernel_centerer.transform(K), K_center) + assert_allclose(kernel_centerer.transform(K_test), K_test_center) + + # check the results coherence with the method proposed in: + # B. Schölkopf, A. Smola, and K.R. Müller, + # "Nonlinear component analysis as a kernel eigenvalue problem" + # equation (B.3) + + # K_centered = (I - 1_M) K (I - 1_M) + # = K - 1_M K - K 1_M + 1_M K 1_M + ones_M = np.ones_like(K) / K.shape[0] + K_centered = K - ones_M @ K - K @ ones_M + ones_M @ K @ ones_M + assert_allclose(kernel_centerer.transform(K), K_centered) + + # K_test_centered = (K_test - 1'_M K)(I - 1_M) + # = K_test - 1'_M K - K_test 1_M + 1'_M K 1_M + ones_prime_M = np.ones_like(K_test) / K.shape[0] + K_test_centered = ( + K_test - ones_prime_M @ K - K_test @ ones_M + ones_prime_M @ K @ ones_M + ) + assert_allclose(kernel_centerer.transform(K_test), K_test_centered) + + +def test_cv_pipeline_precomputed(): + # Cross-validate a regression on four coplanar points with the same + # value. Use precomputed kernel to ensure Pipeline with KernelCenterer + # is treated as a pairwise operation. + X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]]) + y_true = np.ones((4,)) + K = X.dot(X.T) + kcent = KernelCenterer() + pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())]) + + # did the pipeline set the pairwise attribute? + assert pipeline._get_tags()["pairwise"] + + # test cross-validation, score should be almost perfect + # NB: this test is pretty vacuous -- it's mainly to test integration + # of Pipeline and KernelCenterer + y_pred = cross_val_predict(pipeline, K, y_true, cv=2) + assert_array_almost_equal(y_true, y_pred) + + +def test_fit_transform(): + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + for obj in (StandardScaler(), Normalizer(), Binarizer()): + X_transformed = obj.fit(X).transform(X) + X_transformed2 = obj.fit_transform(X) + assert_array_equal(X_transformed, X_transformed2) + + +def test_add_dummy_feature(): + X = [[1, 0], [0, 1], [0, 1]] + X = add_dummy_feature(X) + assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_add_dummy_feature_sparse(sparse_container): + X = sparse_container([[1, 0], [0, 1], [0, 1]]) + desired_format = X.format + X = add_dummy_feature(X) + assert sparse.issparse(X) and X.format == desired_format, X + assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) + + +def test_fit_cold_start(): + X = iris.data + X_2d = X[:, :2] + + # Scalers that have a partial_fit method + scalers = [ + StandardScaler(with_mean=False, with_std=False), + MinMaxScaler(), + MaxAbsScaler(), + ] + + for scaler in scalers: + scaler.fit_transform(X) + # with a different shape, this may break the scaler unless the internal + # state is reset + scaler.fit_transform(X_2d) + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +def test_power_transformer_notfitted(method): + pt = PowerTransformer(method=method) + X = np.abs(X_1col) + with pytest.raises(NotFittedError): + pt.transform(X) + with pytest.raises(NotFittedError): + pt.inverse_transform(X) + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +@pytest.mark.parametrize("standardize", [True, False]) +@pytest.mark.parametrize("X", [X_1col, X_2d]) +def test_power_transformer_inverse(method, standardize, X): + # Make sure we get the original input when applying transform and then + # inverse transform + X = np.abs(X) if method == "box-cox" else X + pt = PowerTransformer(method=method, standardize=standardize) + X_trans = pt.fit_transform(X) + assert_almost_equal(X, pt.inverse_transform(X_trans)) + + +def test_power_transformer_1d(): + X = np.abs(X_1col) + + for standardize in [True, False]: + pt = PowerTransformer(method="box-cox", standardize=standardize) + + X_trans = pt.fit_transform(X) + X_trans_func = power_transform(X, method="box-cox", standardize=standardize) + + X_expected, lambda_expected = stats.boxcox(X.flatten()) + + if standardize: + X_expected = scale(X_expected) + + assert_almost_equal(X_expected.reshape(-1, 1), X_trans) + assert_almost_equal(X_expected.reshape(-1, 1), X_trans_func) + + assert_almost_equal(X, pt.inverse_transform(X_trans)) + assert_almost_equal(lambda_expected, pt.lambdas_[0]) + + assert len(pt.lambdas_) == X.shape[1] + assert isinstance(pt.lambdas_, np.ndarray) + + +def test_power_transformer_2d(): + X = np.abs(X_2d) + + for standardize in [True, False]: + pt = PowerTransformer(method="box-cox", standardize=standardize) + + X_trans_class = pt.fit_transform(X) + X_trans_func = power_transform(X, method="box-cox", standardize=standardize) + + for X_trans in [X_trans_class, X_trans_func]: + for j in range(X_trans.shape[1]): + X_expected, lmbda = stats.boxcox(X[:, j].flatten()) + + if standardize: + X_expected = scale(X_expected) + + assert_almost_equal(X_trans[:, j], X_expected) + assert_almost_equal(lmbda, pt.lambdas_[j]) + + # Test inverse transformation + X_inv = pt.inverse_transform(X_trans) + assert_array_almost_equal(X_inv, X) + + assert len(pt.lambdas_) == X.shape[1] + assert isinstance(pt.lambdas_, np.ndarray) + + +def test_power_transformer_boxcox_strictly_positive_exception(): + # Exceptions should be raised for negative arrays and zero arrays when + # method is boxcox + + pt = PowerTransformer(method="box-cox") + pt.fit(np.abs(X_2d)) + X_with_negatives = X_2d + not_positive_message = "strictly positive" + + with pytest.raises(ValueError, match=not_positive_message): + pt.transform(X_with_negatives) + + with pytest.raises(ValueError, match=not_positive_message): + pt.fit(X_with_negatives) + + with pytest.raises(ValueError, match=not_positive_message): + power_transform(X_with_negatives, method="box-cox") + + with pytest.raises(ValueError, match=not_positive_message): + pt.transform(np.zeros(X_2d.shape)) + + with pytest.raises(ValueError, match=not_positive_message): + pt.fit(np.zeros(X_2d.shape)) + + with pytest.raises(ValueError, match=not_positive_message): + power_transform(np.zeros(X_2d.shape), method="box-cox") + + +@pytest.mark.parametrize("X", [X_2d, np.abs(X_2d), -np.abs(X_2d), np.zeros(X_2d.shape)]) +def test_power_transformer_yeojohnson_any_input(X): + # Yeo-Johnson method should support any kind of input + power_transform(X, method="yeo-johnson") + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +def test_power_transformer_shape_exception(method): + pt = PowerTransformer(method=method) + X = np.abs(X_2d) + pt.fit(X) + + # Exceptions should be raised for arrays with different num_columns + # than during fitting + wrong_shape_message = ( + r"X has \d+ features, but PowerTransformer is " r"expecting \d+ features" + ) + + with pytest.raises(ValueError, match=wrong_shape_message): + pt.transform(X[:, 0:1]) + + with pytest.raises(ValueError, match=wrong_shape_message): + pt.inverse_transform(X[:, 0:1]) + + +def test_power_transformer_lambda_zero(): + pt = PowerTransformer(method="box-cox", standardize=False) + X = np.abs(X_2d)[:, 0:1] + + # Test the lambda = 0 case + pt.lambdas_ = np.array([0]) + X_trans = pt.transform(X) + assert_array_almost_equal(pt.inverse_transform(X_trans), X) + + +def test_power_transformer_lambda_one(): + # Make sure lambda = 1 corresponds to the identity for yeo-johnson + pt = PowerTransformer(method="yeo-johnson", standardize=False) + X = np.abs(X_2d)[:, 0:1] + + pt.lambdas_ = np.array([1]) + X_trans = pt.transform(X) + assert_array_almost_equal(X_trans, X) + + +@pytest.mark.parametrize( + "method, lmbda", + [ + ("box-cox", 0.1), + ("box-cox", 0.5), + ("yeo-johnson", 0.1), + ("yeo-johnson", 0.5), + ("yeo-johnson", 1.0), + ], +) +def test_optimization_power_transformer(method, lmbda): + # Test the optimization procedure: + # - set a predefined value for lambda + # - apply inverse_transform to a normal dist (we get X_inv) + # - apply fit_transform to X_inv (we get X_inv_trans) + # - check that X_inv_trans is roughly equal to X + + rng = np.random.RandomState(0) + n_samples = 20000 + X = rng.normal(loc=0, scale=1, size=(n_samples, 1)) + + pt = PowerTransformer(method=method, standardize=False) + pt.lambdas_ = [lmbda] + X_inv = pt.inverse_transform(X) + + pt = PowerTransformer(method=method, standardize=False) + X_inv_trans = pt.fit_transform(X_inv) + + assert_almost_equal(0, np.linalg.norm(X - X_inv_trans) / n_samples, decimal=2) + assert_almost_equal(0, X_inv_trans.mean(), decimal=1) + assert_almost_equal(1, X_inv_trans.std(), decimal=1) + + +def test_yeo_johnson_darwin_example(): + # test from original paper "A new family of power transformations to + # improve normality or symmetry" by Yeo and Johnson. + X = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3, 7.5, -6.0] + X = np.array(X).reshape(-1, 1) + lmbda = PowerTransformer(method="yeo-johnson").fit(X).lambdas_ + assert np.allclose(lmbda, 1.305, atol=1e-3) + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +def test_power_transformer_nans(method): + # Make sure lambda estimation is not influenced by NaN values + # and that transform() supports NaN silently + + X = np.abs(X_1col) + pt = PowerTransformer(method=method) + pt.fit(X) + lmbda_no_nans = pt.lambdas_[0] + + # concat nans at the end and check lambda stays the same + X = np.concatenate([X, np.full_like(X, np.nan)]) + X = shuffle(X, random_state=0) + + pt.fit(X) + lmbda_nans = pt.lambdas_[0] + + assert_almost_equal(lmbda_no_nans, lmbda_nans, decimal=5) + + X_trans = pt.transform(X) + assert_array_equal(np.isnan(X_trans), np.isnan(X)) + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +@pytest.mark.parametrize("standardize", [True, False]) +def test_power_transformer_fit_transform(method, standardize): + # check that fit_transform() and fit().transform() return the same values + X = X_1col + if method == "box-cox": + X = np.abs(X) + + pt = PowerTransformer(method, standardize=standardize) + assert_array_almost_equal(pt.fit(X).transform(X), pt.fit_transform(X)) + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +@pytest.mark.parametrize("standardize", [True, False]) +def test_power_transformer_copy_True(method, standardize): + # Check that neither fit, transform, fit_transform nor inverse_transform + # modify X inplace when copy=True + X = X_1col + if method == "box-cox": + X = np.abs(X) + + X_original = X.copy() + assert X is not X_original # sanity checks + assert_array_almost_equal(X, X_original) + + pt = PowerTransformer(method, standardize=standardize, copy=True) + + pt.fit(X) + assert_array_almost_equal(X, X_original) + X_trans = pt.transform(X) + assert X_trans is not X + + X_trans = pt.fit_transform(X) + assert_array_almost_equal(X, X_original) + assert X_trans is not X + + X_inv_trans = pt.inverse_transform(X_trans) + assert X_trans is not X_inv_trans + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +@pytest.mark.parametrize("standardize", [True, False]) +def test_power_transformer_copy_False(method, standardize): + # check that when copy=False fit doesn't change X inplace but transform, + # fit_transform and inverse_transform do. + X = X_1col + if method == "box-cox": + X = np.abs(X) + + X_original = X.copy() + assert X is not X_original # sanity checks + assert_array_almost_equal(X, X_original) + + pt = PowerTransformer(method, standardize=standardize, copy=False) + + pt.fit(X) + assert_array_almost_equal(X, X_original) # fit didn't change X + + X_trans = pt.transform(X) + assert X_trans is X + + if method == "box-cox": + X = np.abs(X) + X_trans = pt.fit_transform(X) + assert X_trans is X + + X_inv_trans = pt.inverse_transform(X_trans) + assert X_trans is X_inv_trans + + +def test_power_transformer_box_cox_raise_all_nans_col(): + """Check that box-cox raises informative when a column contains all nans. + + Non-regression test for gh-26303 + """ + X = rng.random_sample((4, 5)) + X[:, 0] = np.nan + + err_msg = "Column must not be all nan." + + pt = PowerTransformer(method="box-cox") + with pytest.raises(ValueError, match=err_msg): + pt.fit_transform(X) + + +@pytest.mark.parametrize( + "X_2", + [sparse.random(10, 1, density=0.8, random_state=0)] + + [ + csr_container(np.full((10, 1), fill_value=np.nan)) + for csr_container in CSR_CONTAINERS + ], +) +def test_standard_scaler_sparse_partial_fit_finite_variance(X_2): + # non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/16448 + X_1 = sparse.random(5, 1, density=0.8) + scaler = StandardScaler(with_mean=False) + scaler.fit(X_1).partial_fit(X_2) + assert np.isfinite(scaler.var_[0]) + + +@pytest.mark.parametrize("feature_range", [(0, 1), (-10, 10)]) +def test_minmax_scaler_clip(feature_range): + # test behaviour of the parameter 'clip' in MinMaxScaler + X = iris.data + scaler = MinMaxScaler(feature_range=feature_range, clip=True).fit(X) + X_min, X_max = np.min(X, axis=0), np.max(X, axis=0) + X_test = [np.r_[X_min[:2] - 10, X_max[2:] + 10]] + X_transformed = scaler.transform(X_test) + assert_allclose( + X_transformed, + [[feature_range[0], feature_range[0], feature_range[1], feature_range[1]]], + ) + + +def test_standard_scaler_raise_error_for_1d_input(): + """Check that `inverse_transform` from `StandardScaler` raises an error + with 1D array. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19518 + """ + scaler = StandardScaler().fit(X_2d) + err_msg = "Expected 2D array, got 1D array instead" + with pytest.raises(ValueError, match=err_msg): + scaler.inverse_transform(X_2d[:, 0]) + + +def test_power_transformer_significantly_non_gaussian(): + """Check that significantly non-Gaussian data before transforms correctly. + + For some explored lambdas, the transformed data may be constant and will + be rejected. Non-regression test for + https://github.com/scikit-learn/scikit-learn/issues/14959 + """ + + X_non_gaussian = 1e6 * np.array( + [0.6, 2.0, 3.0, 4.0] * 4 + [11, 12, 12, 16, 17, 20, 85, 90], dtype=np.float64 + ).reshape(-1, 1) + pt = PowerTransformer() + + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + X_trans = pt.fit_transform(X_non_gaussian) + + assert not np.any(np.isnan(X_trans)) + assert X_trans.mean() == pytest.approx(0.0) + assert X_trans.std() == pytest.approx(1.0) + assert X_trans.min() > -2 + assert X_trans.max() < 2 + + +@pytest.mark.parametrize( + "Transformer", + [ + MinMaxScaler, + MaxAbsScaler, + RobustScaler, + StandardScaler, + QuantileTransformer, + PowerTransformer, + ], +) +def test_one_to_one_features(Transformer): + """Check one-to-one transformers give correct feature names.""" + tr = Transformer().fit(iris.data) + names_out = tr.get_feature_names_out(iris.feature_names) + assert_array_equal(names_out, iris.feature_names) + + +@pytest.mark.parametrize( + "Transformer", + [ + MinMaxScaler, + MaxAbsScaler, + RobustScaler, + StandardScaler, + QuantileTransformer, + PowerTransformer, + Normalizer, + Binarizer, + ], +) +def test_one_to_one_features_pandas(Transformer): + """Check one-to-one transformers give correct feature names.""" + pd = pytest.importorskip("pandas") + + df = pd.DataFrame(iris.data, columns=iris.feature_names) + tr = Transformer().fit(df) + + names_out_df_default = tr.get_feature_names_out() + assert_array_equal(names_out_df_default, iris.feature_names) + + names_out_df_valid_in = tr.get_feature_names_out(iris.feature_names) + assert_array_equal(names_out_df_valid_in, iris.feature_names) + + msg = re.escape("input_features is not equal to feature_names_in_") + with pytest.raises(ValueError, match=msg): + invalid_names = list("abcd") + tr.get_feature_names_out(invalid_names) + + +def test_kernel_centerer_feature_names_out(): + """Test that kernel centerer `feature_names_out`.""" + + rng = np.random.RandomState(0) + X = rng.random_sample((6, 4)) + X_pairwise = linear_kernel(X) + centerer = KernelCenterer().fit(X_pairwise) + + names_out = centerer.get_feature_names_out() + samples_out2 = X_pairwise.shape[1] + assert_array_equal(names_out, [f"kernelcenterer{i}" for i in range(samples_out2)]) + + +@pytest.mark.parametrize("standardize", [True, False]) +def test_power_transformer_constant_feature(standardize): + """Check that PowerTransfomer leaves constant features unchanged.""" + X = [[-2, 0, 2], [-2, 0, 2], [-2, 0, 2]] + + pt = PowerTransformer(method="yeo-johnson", standardize=standardize).fit(X) + + assert_allclose(pt.lambdas_, [1, 1, 1]) + + Xft = pt.fit_transform(X) + Xt = pt.transform(X) + + for Xt_ in [Xft, Xt]: + if standardize: + assert_allclose(Xt_, np.zeros_like(X)) + else: + assert_allclose(Xt_, X) diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_function_transformer.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_function_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..e7b86e88d1547cb296d89687f2179ab850349dd5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_function_transformer.py @@ -0,0 +1,591 @@ +import warnings + +import numpy as np +import pytest + +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import FunctionTransformer, StandardScaler +from sklearn.preprocessing._function_transformer import _get_adapter_from_container +from sklearn.utils._testing import ( + _convert_container, + assert_allclose_dense_sparse, + assert_array_equal, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + + +def test_get_adapter_from_container(): + """Check the behavior fo `_get_adapter_from_container`.""" + pd = pytest.importorskip("pandas") + X = pd.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]}) + adapter = _get_adapter_from_container(X) + assert adapter.container_lib == "pandas" + err_msg = "The container does not have a registered adapter in scikit-learn." + with pytest.raises(ValueError, match=err_msg): + _get_adapter_from_container(X.to_numpy()) + + +def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X): + def _func(X, *args, **kwargs): + args_store.append(X) + args_store.extend(args) + kwargs_store.update(kwargs) + return func(X) + + return _func + + +def test_delegate_to_func(): + # (args|kwargs)_store will hold the positional and keyword arguments + # passed to the function inside the FunctionTransformer. + args_store = [] + kwargs_store = {} + X = np.arange(10).reshape((5, 2)) + assert_array_equal( + FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X), + X, + "transform should have returned X unchanged", + ) + + # The function should only have received X. + assert args_store == [ + X + ], "Incorrect positional arguments passed to func: {args}".format(args=args_store) + + assert ( + not kwargs_store + ), "Unexpected keyword arguments passed to func: {args}".format(args=kwargs_store) + + # reset the argument stores. + args_store[:] = [] + kwargs_store.clear() + transformed = FunctionTransformer( + _make_func(args_store, kwargs_store), + ).transform(X) + + assert_array_equal( + transformed, X, err_msg="transform should have returned X unchanged" + ) + + # The function should have received X + assert args_store == [ + X + ], "Incorrect positional arguments passed to func: {args}".format(args=args_store) + + assert ( + not kwargs_store + ), "Unexpected keyword arguments passed to func: {args}".format(args=kwargs_store) + + +def test_np_log(): + X = np.arange(10).reshape((5, 2)) + + # Test that the numpy.log example still works. + assert_array_equal( + FunctionTransformer(np.log1p).transform(X), + np.log1p(X), + ) + + +def test_kw_arg(): + X = np.linspace(0, 1, num=10).reshape((5, 2)) + + F = FunctionTransformer(np.around, kw_args=dict(decimals=3)) + + # Test that rounding is correct + assert_array_equal(F.transform(X), np.around(X, decimals=3)) + + +def test_kw_arg_update(): + X = np.linspace(0, 1, num=10).reshape((5, 2)) + + F = FunctionTransformer(np.around, kw_args=dict(decimals=3)) + + F.kw_args["decimals"] = 1 + + # Test that rounding is correct + assert_array_equal(F.transform(X), np.around(X, decimals=1)) + + +def test_kw_arg_reset(): + X = np.linspace(0, 1, num=10).reshape((5, 2)) + + F = FunctionTransformer(np.around, kw_args=dict(decimals=3)) + + F.kw_args = dict(decimals=1) + + # Test that rounding is correct + assert_array_equal(F.transform(X), np.around(X, decimals=1)) + + +def test_inverse_transform(): + X = np.array([1, 4, 9, 16]).reshape((2, 2)) + + # Test that inverse_transform works correctly + F = FunctionTransformer( + func=np.sqrt, + inverse_func=np.around, + inv_kw_args=dict(decimals=3), + ) + assert_array_equal( + F.inverse_transform(F.transform(X)), + np.around(np.sqrt(X), decimals=3), + ) + + +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) +def test_check_inverse(sparse_container): + X = np.array([1, 4, 9, 16], dtype=np.float64).reshape((2, 2)) + if sparse_container is not None: + X = sparse_container(X) + + trans = FunctionTransformer( + func=np.sqrt, + inverse_func=np.around, + accept_sparse=sparse_container is not None, + check_inverse=True, + validate=True, + ) + warning_message = ( + "The provided functions are not strictly" + " inverse of each other. If you are sure you" + " want to proceed regardless, set" + " 'check_inverse=False'." + ) + with pytest.warns(UserWarning, match=warning_message): + trans.fit(X) + + trans = FunctionTransformer( + func=np.expm1, + inverse_func=np.log1p, + accept_sparse=sparse_container is not None, + check_inverse=True, + validate=True, + ) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + Xt = trans.fit_transform(X) + + assert_allclose_dense_sparse(X, trans.inverse_transform(Xt)) + + +def test_check_inverse_func_or_inverse_not_provided(): + # check that we don't check inverse when one of the func or inverse is not + # provided. + X = np.array([1, 4, 9, 16], dtype=np.float64).reshape((2, 2)) + + trans = FunctionTransformer( + func=np.expm1, inverse_func=None, check_inverse=True, validate=True + ) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + trans.fit(X) + trans = FunctionTransformer( + func=None, inverse_func=np.expm1, check_inverse=True, validate=True + ) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + trans.fit(X) + + +def test_function_transformer_frame(): + pd = pytest.importorskip("pandas") + X_df = pd.DataFrame(np.random.randn(100, 10)) + transformer = FunctionTransformer() + X_df_trans = transformer.fit_transform(X_df) + assert hasattr(X_df_trans, "loc") + + +@pytest.mark.parametrize("X_type", ["array", "series"]) +def test_function_transformer_raise_error_with_mixed_dtype(X_type): + """Check that `FunctionTransformer.check_inverse` raises error on mixed dtype.""" + mapping = {"one": 1, "two": 2, "three": 3, 5: "five", 6: "six"} + inverse_mapping = {value: key for key, value in mapping.items()} + dtype = "object" + + data = ["one", "two", "three", "one", "one", 5, 6] + data = _convert_container(data, X_type, columns_name=["value"], dtype=dtype) + + def func(X): + return np.array([mapping[X[i]] for i in range(X.size)], dtype=object) + + def inverse_func(X): + return _convert_container( + [inverse_mapping[x] for x in X], + X_type, + columns_name=["value"], + dtype=dtype, + ) + + transformer = FunctionTransformer( + func=func, inverse_func=inverse_func, validate=False, check_inverse=True + ) + + msg = "'check_inverse' is only supported when all the elements in `X` is numerical." + with pytest.raises(ValueError, match=msg): + transformer.fit(data) + + +def test_function_transformer_support_all_nummerical_dataframes_check_inverse_True(): + """Check support for dataframes with only numerical values.""" + pd = pytest.importorskip("pandas") + + df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + transformer = FunctionTransformer( + func=lambda x: x + 2, inverse_func=lambda x: x - 2, check_inverse=True + ) + + # Does not raise an error + df_out = transformer.fit_transform(df) + assert_allclose_dense_sparse(df_out, df + 2) + + +def test_function_transformer_with_dataframe_and_check_inverse_True(): + """Check error is raised when check_inverse=True. + + Non-regresion test for gh-25261. + """ + pd = pytest.importorskip("pandas") + transformer = FunctionTransformer( + func=lambda x: x, inverse_func=lambda x: x, check_inverse=True + ) + + df_mixed = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) + msg = "'check_inverse' is only supported when all the elements in `X` is numerical." + with pytest.raises(ValueError, match=msg): + transformer.fit(df_mixed) + + +@pytest.mark.parametrize( + "X, feature_names_out, input_features, expected", + [ + ( + # NumPy inputs, default behavior: generate names + np.random.rand(100, 3), + "one-to-one", + None, + ("x0", "x1", "x2"), + ), + ( + # Pandas input, default behavior: use input feature names + {"a": np.random.rand(100), "b": np.random.rand(100)}, + "one-to-one", + None, + ("a", "b"), + ), + ( + # NumPy input, feature_names_out=callable + np.random.rand(100, 3), + lambda transformer, input_features: ("a", "b"), + None, + ("a", "b"), + ), + ( + # Pandas input, feature_names_out=callable + {"a": np.random.rand(100), "b": np.random.rand(100)}, + lambda transformer, input_features: ("c", "d", "e"), + None, + ("c", "d", "e"), + ), + ( + # NumPy input, feature_names_out=callable – default input_features + np.random.rand(100, 3), + lambda transformer, input_features: tuple(input_features) + ("a",), + None, + ("x0", "x1", "x2", "a"), + ), + ( + # Pandas input, feature_names_out=callable – default input_features + {"a": np.random.rand(100), "b": np.random.rand(100)}, + lambda transformer, input_features: tuple(input_features) + ("c",), + None, + ("a", "b", "c"), + ), + ( + # NumPy input, input_features=list of names + np.random.rand(100, 3), + "one-to-one", + ("a", "b", "c"), + ("a", "b", "c"), + ), + ( + # Pandas input, input_features=list of names + {"a": np.random.rand(100), "b": np.random.rand(100)}, + "one-to-one", + ("a", "b"), # must match feature_names_in_ + ("a", "b"), + ), + ( + # NumPy input, feature_names_out=callable, input_features=list + np.random.rand(100, 3), + lambda transformer, input_features: tuple(input_features) + ("d",), + ("a", "b", "c"), + ("a", "b", "c", "d"), + ), + ( + # Pandas input, feature_names_out=callable, input_features=list + {"a": np.random.rand(100), "b": np.random.rand(100)}, + lambda transformer, input_features: tuple(input_features) + ("c",), + ("a", "b"), # must match feature_names_in_ + ("a", "b", "c"), + ), + ], +) +@pytest.mark.parametrize("validate", [True, False]) +def test_function_transformer_get_feature_names_out( + X, feature_names_out, input_features, expected, validate +): + if isinstance(X, dict): + pd = pytest.importorskip("pandas") + X = pd.DataFrame(X) + + transformer = FunctionTransformer( + feature_names_out=feature_names_out, validate=validate + ) + transformer.fit(X) + names = transformer.get_feature_names_out(input_features) + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, expected) + + +def test_function_transformer_get_feature_names_out_without_validation(): + transformer = FunctionTransformer(feature_names_out="one-to-one", validate=False) + X = np.random.rand(100, 2) + transformer.fit_transform(X) + + names = transformer.get_feature_names_out(("a", "b")) + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, ("a", "b")) + + +def test_function_transformer_feature_names_out_is_None(): + transformer = FunctionTransformer() + X = np.random.rand(100, 2) + transformer.fit_transform(X) + + msg = "This 'FunctionTransformer' has no attribute 'get_feature_names_out'" + with pytest.raises(AttributeError, match=msg): + transformer.get_feature_names_out() + + +def test_function_transformer_feature_names_out_uses_estimator(): + def add_n_random_features(X, n): + return np.concatenate([X, np.random.rand(len(X), n)], axis=1) + + def feature_names_out(transformer, input_features): + n = transformer.kw_args["n"] + return list(input_features) + [f"rnd{i}" for i in range(n)] + + transformer = FunctionTransformer( + func=add_n_random_features, + feature_names_out=feature_names_out, + kw_args=dict(n=3), + validate=True, + ) + pd = pytest.importorskip("pandas") + df = pd.DataFrame({"a": np.random.rand(100), "b": np.random.rand(100)}) + transformer.fit_transform(df) + names = transformer.get_feature_names_out() + + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, ("a", "b", "rnd0", "rnd1", "rnd2")) + + +def test_function_transformer_validate_inverse(): + """Test that function transformer does not reset estimator in + `inverse_transform`.""" + + def add_constant_feature(X): + X_one = np.ones((X.shape[0], 1)) + return np.concatenate((X, X_one), axis=1) + + def inverse_add_constant(X): + return X[:, :-1] + + X = np.array([[1, 2], [3, 4], [3, 4]]) + trans = FunctionTransformer( + func=add_constant_feature, + inverse_func=inverse_add_constant, + validate=True, + ) + X_trans = trans.fit_transform(X) + assert trans.n_features_in_ == X.shape[1] + + trans.inverse_transform(X_trans) + assert trans.n_features_in_ == X.shape[1] + + +@pytest.mark.parametrize( + "feature_names_out, expected", + [ + ("one-to-one", ["pet", "color"]), + [lambda est, names: [f"{n}_out" for n in names], ["pet_out", "color_out"]], + ], +) +@pytest.mark.parametrize("in_pipeline", [True, False]) +def test_get_feature_names_out_dataframe_with_string_data( + feature_names_out, expected, in_pipeline +): + """Check that get_feature_names_out works with DataFrames with string data.""" + pd = pytest.importorskip("pandas") + X = pd.DataFrame({"pet": ["dog", "cat"], "color": ["red", "green"]}) + + def func(X): + if feature_names_out == "one-to-one": + return X + else: + name = feature_names_out(None, X.columns) + return X.rename(columns=dict(zip(X.columns, name))) + + transformer = FunctionTransformer(func=func, feature_names_out=feature_names_out) + if in_pipeline: + transformer = make_pipeline(transformer) + + X_trans = transformer.fit_transform(X) + assert isinstance(X_trans, pd.DataFrame) + + names = transformer.get_feature_names_out() + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, expected) + + +def test_set_output_func(): + """Check behavior of set_output with different settings.""" + pd = pytest.importorskip("pandas") + + X = pd.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]}) + + ft = FunctionTransformer(np.log, feature_names_out="one-to-one") + + # no warning is raised when feature_names_out is defined + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + ft.set_output(transform="pandas") + + X_trans = ft.fit_transform(X) + assert isinstance(X_trans, pd.DataFrame) + assert_array_equal(X_trans.columns, ["a", "b"]) + + ft = FunctionTransformer(lambda x: 2 * x) + ft.set_output(transform="pandas") + + # no warning is raised when func returns a panda dataframe + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + X_trans = ft.fit_transform(X) + assert isinstance(X_trans, pd.DataFrame) + assert_array_equal(X_trans.columns, ["a", "b"]) + + # Warning is raised when func returns a ndarray + ft_np = FunctionTransformer(lambda x: np.asarray(x)) + + for transform in ("pandas", "polars"): + ft_np.set_output(transform=transform) + msg = ( + f"When `set_output` is configured to be '{transform}'.*{transform} " + "DataFrame.*" + ) + with pytest.warns(UserWarning, match=msg): + ft_np.fit_transform(X) + + # default transform does not warn + ft_np.set_output(transform="default") + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + ft_np.fit_transform(X) + + +def test_consistence_column_name_between_steps(): + """Check that we have a consistence between the feature names out of + `FunctionTransformer` and the feature names in of the next step in the pipeline. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27695 + """ + pd = pytest.importorskip("pandas") + + def with_suffix(_, names): + return [name + "__log" for name in names] + + pipeline = make_pipeline( + FunctionTransformer(np.log1p, feature_names_out=with_suffix), StandardScaler() + ) + + df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=["a", "b"]) + X_trans = pipeline.fit_transform(df) + assert pipeline.get_feature_names_out().tolist() == ["a__log", "b__log"] + # StandardScaler will convert to a numpy array + assert isinstance(X_trans, np.ndarray) + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +@pytest.mark.parametrize("transform_output", ["default", "pandas", "polars"]) +def test_function_transformer_overwrite_column_names(dataframe_lib, transform_output): + """Check that we overwrite the column names when we should.""" + lib = pytest.importorskip(dataframe_lib) + if transform_output != "numpy": + pytest.importorskip(transform_output) + + df = lib.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]}) + + def with_suffix(_, names): + return [name + "__log" for name in names] + + transformer = FunctionTransformer(feature_names_out=with_suffix).set_output( + transform=transform_output + ) + X_trans = transformer.fit_transform(df) + assert_array_equal(np.asarray(X_trans), np.asarray(df)) + + feature_names = transformer.get_feature_names_out() + assert list(X_trans.columns) == with_suffix(None, df.columns) + assert feature_names.tolist() == with_suffix(None, df.columns) + + +@pytest.mark.parametrize( + "feature_names_out", + ["one-to-one", lambda _, names: [f"{name}_log" for name in names]], +) +def test_function_transformer_overwrite_column_names_numerical(feature_names_out): + """Check the same as `test_function_transformer_overwrite_column_names` + but for the specific case of pandas where column names can be numerical.""" + pd = pytest.importorskip("pandas") + + df = pd.DataFrame({0: [1, 2, 3], 1: [10, 20, 100]}) + + transformer = FunctionTransformer(feature_names_out=feature_names_out) + X_trans = transformer.fit_transform(df) + assert_array_equal(np.asarray(X_trans), np.asarray(df)) + + feature_names = transformer.get_feature_names_out() + assert list(X_trans.columns) == list(feature_names) + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +@pytest.mark.parametrize( + "feature_names_out", + ["one-to-one", lambda _, names: [f"{name}_log" for name in names]], +) +def test_function_transformer_error_column_inconsistent( + dataframe_lib, feature_names_out +): + """Check that we raise an error when `func` returns a dataframe with new + column names that become inconsistent with `get_feature_names_out`.""" + lib = pytest.importorskip(dataframe_lib) + + df = lib.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]}) + + def func(df): + if dataframe_lib == "pandas": + return df.rename(columns={"a": "c"}) + else: + return df.rename({"a": "c"}) + + transformer = FunctionTransformer(func=func, feature_names_out=feature_names_out) + err_msg = "The output generated by `func` have different column names" + with pytest.raises(ValueError, match=err_msg): + transformer.fit_transform(df).columns diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_label.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_label.py new file mode 100644 index 0000000000000000000000000000000000000000..cce0ddc5c267eb77ef85b64e5257080d75d09449 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_label.py @@ -0,0 +1,699 @@ +import numpy as np +import pytest +from scipy.sparse import issparse + +from sklearn import datasets +from sklearn.preprocessing._label import ( + LabelBinarizer, + LabelEncoder, + MultiLabelBinarizer, + _inverse_binarize_multiclass, + _inverse_binarize_thresholding, + label_binarize, +) +from sklearn.utils import _to_object_array +from sklearn.utils._testing import assert_array_equal, ignore_warnings +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) +from sklearn.utils.multiclass import type_of_target + +iris = datasets.load_iris() + + +def toarray(a): + if hasattr(a, "toarray"): + a = a.toarray() + return a + + +def test_label_binarizer(): + # one-class case defaults to negative label + # For dense case: + inp = ["pos", "pos", "pos", "pos"] + lb = LabelBinarizer(sparse_output=False) + expected = np.array([[0, 0, 0, 0]]).T + got = lb.fit_transform(inp) + assert_array_equal(lb.classes_, ["pos"]) + assert_array_equal(expected, got) + assert_array_equal(lb.inverse_transform(got), inp) + + # For sparse case: + lb = LabelBinarizer(sparse_output=True) + got = lb.fit_transform(inp) + assert issparse(got) + assert_array_equal(lb.classes_, ["pos"]) + assert_array_equal(expected, got.toarray()) + assert_array_equal(lb.inverse_transform(got.toarray()), inp) + + lb = LabelBinarizer(sparse_output=False) + # two-class case + inp = ["neg", "pos", "pos", "neg"] + expected = np.array([[0, 1, 1, 0]]).T + got = lb.fit_transform(inp) + assert_array_equal(lb.classes_, ["neg", "pos"]) + assert_array_equal(expected, got) + + to_invert = np.array([[1, 0], [0, 1], [0, 1], [1, 0]]) + assert_array_equal(lb.inverse_transform(to_invert), inp) + + # multi-class case + inp = ["spam", "ham", "eggs", "ham", "0"] + expected = np.array( + [[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]] + ) + got = lb.fit_transform(inp) + assert_array_equal(lb.classes_, ["0", "eggs", "ham", "spam"]) + assert_array_equal(expected, got) + assert_array_equal(lb.inverse_transform(got), inp) + + +def test_label_binarizer_unseen_labels(): + lb = LabelBinarizer() + + expected = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + got = lb.fit_transform(["b", "d", "e"]) + assert_array_equal(expected, got) + + expected = np.array( + [[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]] + ) + got = lb.transform(["a", "b", "c", "d", "e", "f"]) + assert_array_equal(expected, got) + + +def test_label_binarizer_set_label_encoding(): + lb = LabelBinarizer(neg_label=-2, pos_label=0) + + # two-class case with pos_label=0 + inp = np.array([0, 1, 1, 0]) + expected = np.array([[-2, 0, 0, -2]]).T + got = lb.fit_transform(inp) + assert_array_equal(expected, got) + assert_array_equal(lb.inverse_transform(got), inp) + + lb = LabelBinarizer(neg_label=-2, pos_label=2) + + # multi-class case + inp = np.array([3, 2, 1, 2, 0]) + expected = np.array( + [ + [-2, -2, -2, +2], + [-2, -2, +2, -2], + [-2, +2, -2, -2], + [-2, -2, +2, -2], + [+2, -2, -2, -2], + ] + ) + got = lb.fit_transform(inp) + assert_array_equal(expected, got) + assert_array_equal(lb.inverse_transform(got), inp) + + +@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"]) +@pytest.mark.parametrize("unique_first", [True, False]) +def test_label_binarizer_pandas_nullable(dtype, unique_first): + """Checks that LabelBinarizer works with pandas nullable dtypes. + + Non-regression test for gh-25637. + """ + pd = pytest.importorskip("pandas") + + y_true = pd.Series([1, 0, 0, 1, 0, 1, 1, 0, 1], dtype=dtype) + if unique_first: + # Calling unique creates a pandas array which has a different interface + # compared to a pandas Series. Specifically, pandas arrays do not have "iloc". + y_true = y_true.unique() + lb = LabelBinarizer().fit(y_true) + y_out = lb.transform([1, 0]) + + assert_array_equal(y_out, [[1], [0]]) + + +@ignore_warnings +def test_label_binarizer_errors(): + # Check that invalid arguments yield ValueError + one_class = np.array([0, 0, 0, 0]) + lb = LabelBinarizer().fit(one_class) + + multi_label = [(2, 3), (0,), (0, 2)] + err_msg = "You appear to be using a legacy multi-label data representation." + with pytest.raises(ValueError, match=err_msg): + lb.transform(multi_label) + + lb = LabelBinarizer() + err_msg = "This LabelBinarizer instance is not fitted yet" + with pytest.raises(ValueError, match=err_msg): + lb.transform([]) + with pytest.raises(ValueError, match=err_msg): + lb.inverse_transform([]) + + input_labels = [0, 1, 0, 1] + err_msg = "neg_label=2 must be strictly less than pos_label=1." + lb = LabelBinarizer(neg_label=2, pos_label=1) + with pytest.raises(ValueError, match=err_msg): + lb.fit(input_labels) + err_msg = "neg_label=2 must be strictly less than pos_label=2." + lb = LabelBinarizer(neg_label=2, pos_label=2) + with pytest.raises(ValueError, match=err_msg): + lb.fit(input_labels) + err_msg = ( + "Sparse binarization is only supported with non zero pos_label and zero " + "neg_label, got pos_label=2 and neg_label=1" + ) + lb = LabelBinarizer(neg_label=1, pos_label=2, sparse_output=True) + with pytest.raises(ValueError, match=err_msg): + lb.fit(input_labels) + + # Sequence of seq type should raise ValueError + y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]] + err_msg = "You appear to be using a legacy multi-label data representation" + with pytest.raises(ValueError, match=err_msg): + LabelBinarizer().fit_transform(y_seq_of_seqs) + + # Fail on the dimension of 'binary' + err_msg = "output_type='binary', but y.shape" + with pytest.raises(ValueError, match=err_msg): + _inverse_binarize_thresholding( + y=np.array([[1, 2, 3], [2, 1, 3]]), + output_type="binary", + classes=[1, 2, 3], + threshold=0, + ) + + # Fail on multioutput data + err_msg = "Multioutput target data is not supported with label binarization" + with pytest.raises(ValueError, match=err_msg): + LabelBinarizer().fit(np.array([[1, 3], [2, 1]])) + with pytest.raises(ValueError, match=err_msg): + label_binarize(np.array([[1, 3], [2, 1]]), classes=[1, 2, 3]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_label_binarizer_sparse_errors(csr_container): + # Fail on y_type + err_msg = "foo format is not supported" + with pytest.raises(ValueError, match=err_msg): + _inverse_binarize_thresholding( + y=csr_container([[1, 2], [2, 1]]), + output_type="foo", + classes=[1, 2], + threshold=0, + ) + + # Fail on the number of classes + err_msg = "The number of class is not equal to the number of dimension of y." + with pytest.raises(ValueError, match=err_msg): + _inverse_binarize_thresholding( + y=csr_container([[1, 2], [2, 1]]), + output_type="foo", + classes=[1, 2, 3], + threshold=0, + ) + + +@pytest.mark.parametrize( + "values, classes, unknown", + [ + ( + np.array([2, 1, 3, 1, 3], dtype="int64"), + np.array([1, 2, 3], dtype="int64"), + np.array([4], dtype="int64"), + ), + ( + np.array(["b", "a", "c", "a", "c"], dtype=object), + np.array(["a", "b", "c"], dtype=object), + np.array(["d"], dtype=object), + ), + ( + np.array(["b", "a", "c", "a", "c"]), + np.array(["a", "b", "c"]), + np.array(["d"]), + ), + ], + ids=["int64", "object", "str"], +) +def test_label_encoder(values, classes, unknown): + # Test LabelEncoder's transform, fit_transform and + # inverse_transform methods + le = LabelEncoder() + le.fit(values) + assert_array_equal(le.classes_, classes) + assert_array_equal(le.transform(values), [1, 0, 2, 0, 2]) + assert_array_equal(le.inverse_transform([1, 0, 2, 0, 2]), values) + le = LabelEncoder() + ret = le.fit_transform(values) + assert_array_equal(ret, [1, 0, 2, 0, 2]) + + with pytest.raises(ValueError, match="unseen labels"): + le.transform(unknown) + + +def test_label_encoder_negative_ints(): + le = LabelEncoder() + le.fit([1, 1, 4, 5, -1, 0]) + assert_array_equal(le.classes_, [-1, 0, 1, 4, 5]) + assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]), [1, 2, 3, 3, 4, 0, 0]) + assert_array_equal( + le.inverse_transform([1, 2, 3, 3, 4, 0, 0]), [0, 1, 4, 4, 5, -1, -1] + ) + with pytest.raises(ValueError): + le.transform([0, 6]) + + +@pytest.mark.parametrize("dtype", ["str", "object"]) +def test_label_encoder_str_bad_shape(dtype): + le = LabelEncoder() + le.fit(np.array(["apple", "orange"], dtype=dtype)) + msg = "should be a 1d array" + with pytest.raises(ValueError, match=msg): + le.transform("apple") + + +def test_label_encoder_errors(): + # Check that invalid arguments yield ValueError + le = LabelEncoder() + with pytest.raises(ValueError): + le.transform([]) + with pytest.raises(ValueError): + le.inverse_transform([]) + + # Fail on unseen labels + le = LabelEncoder() + le.fit([1, 2, 3, -1, 1]) + msg = "contains previously unseen labels" + with pytest.raises(ValueError, match=msg): + le.inverse_transform([-2]) + with pytest.raises(ValueError, match=msg): + le.inverse_transform([-2, -3, -4]) + + # Fail on inverse_transform("") + msg = r"should be a 1d array.+shape \(\)" + with pytest.raises(ValueError, match=msg): + le.inverse_transform("") + + +@pytest.mark.parametrize( + "values", + [ + np.array([2, 1, 3, 1, 3], dtype="int64"), + np.array(["b", "a", "c", "a", "c"], dtype=object), + np.array(["b", "a", "c", "a", "c"]), + ], + ids=["int64", "object", "str"], +) +def test_label_encoder_empty_array(values): + le = LabelEncoder() + le.fit(values) + # test empty transform + transformed = le.transform([]) + assert_array_equal(np.array([]), transformed) + # test empty inverse transform + inverse_transformed = le.inverse_transform([]) + assert_array_equal(np.array([]), inverse_transformed) + + +def test_sparse_output_multilabel_binarizer(): + # test input as iterable of iterables + inputs = [ + lambda: [(2, 3), (1,), (1, 2)], + lambda: ({2, 3}, {1}, {1, 2}), + lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]), + ] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) + + inverse = inputs[0]() + for sparse_output in [True, False]: + for inp in inputs: + # With fit_transform + mlb = MultiLabelBinarizer(sparse_output=sparse_output) + got = mlb.fit_transform(inp()) + assert issparse(got) == sparse_output + if sparse_output: + # verify CSR assumption that indices and indptr have same dtype + assert got.indices.dtype == got.indptr.dtype + got = got.toarray() + assert_array_equal(indicator_mat, got) + assert_array_equal([1, 2, 3], mlb.classes_) + assert mlb.inverse_transform(got) == inverse + + # With fit + mlb = MultiLabelBinarizer(sparse_output=sparse_output) + got = mlb.fit(inp()).transform(inp()) + assert issparse(got) == sparse_output + if sparse_output: + # verify CSR assumption that indices and indptr have same dtype + assert got.indices.dtype == got.indptr.dtype + got = got.toarray() + assert_array_equal(indicator_mat, got) + assert_array_equal([1, 2, 3], mlb.classes_) + assert mlb.inverse_transform(got) == inverse + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_output_multilabel_binarizer_errors(csr_container): + inp = iter([iter((2, 3)), iter((1,)), {1, 2}]) + mlb = MultiLabelBinarizer(sparse_output=False) + mlb.fit(inp) + with pytest.raises(ValueError): + mlb.inverse_transform( + csr_container(np.array([[0, 1, 1], [2, 0, 0], [1, 1, 0]])) + ) + + +def test_multilabel_binarizer(): + # test input as iterable of iterables + inputs = [ + lambda: [(2, 3), (1,), (1, 2)], + lambda: ({2, 3}, {1}, {1, 2}), + lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]), + ] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) + inverse = inputs[0]() + for inp in inputs: + # With fit_transform + mlb = MultiLabelBinarizer() + got = mlb.fit_transform(inp()) + assert_array_equal(indicator_mat, got) + assert_array_equal([1, 2, 3], mlb.classes_) + assert mlb.inverse_transform(got) == inverse + + # With fit + mlb = MultiLabelBinarizer() + got = mlb.fit(inp()).transform(inp()) + assert_array_equal(indicator_mat, got) + assert_array_equal([1, 2, 3], mlb.classes_) + assert mlb.inverse_transform(got) == inverse + + +def test_multilabel_binarizer_empty_sample(): + mlb = MultiLabelBinarizer() + y = [[1, 2], [1], []] + Y = np.array([[1, 1], [1, 0], [0, 0]]) + assert_array_equal(mlb.fit_transform(y), Y) + + +def test_multilabel_binarizer_unknown_class(): + mlb = MultiLabelBinarizer() + y = [[1, 2]] + Y = np.array([[1, 0], [0, 1]]) + warning_message = "unknown class.* will be ignored" + with pytest.warns(UserWarning, match=warning_message): + matrix = mlb.fit(y).transform([[4, 1], [2, 0]]) + + Y = np.array([[1, 0, 0], [0, 1, 0]]) + mlb = MultiLabelBinarizer(classes=[1, 2, 3]) + with pytest.warns(UserWarning, match=warning_message): + matrix = mlb.fit(y).transform([[4, 1], [2, 0]]) + assert_array_equal(matrix, Y) + + +def test_multilabel_binarizer_given_classes(): + inp = [(2, 3), (1,), (1, 2)] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1]]) + # fit_transform() + mlb = MultiLabelBinarizer(classes=[1, 3, 2]) + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + assert_array_equal(mlb.classes_, [1, 3, 2]) + + # fit().transform() + mlb = MultiLabelBinarizer(classes=[1, 3, 2]) + assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) + assert_array_equal(mlb.classes_, [1, 3, 2]) + + # ensure works with extra class + mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2]) + assert_array_equal( + mlb.fit_transform(inp), np.hstack(([[0], [0], [0]], indicator_mat)) + ) + assert_array_equal(mlb.classes_, [4, 1, 3, 2]) + + # ensure fit is no-op as iterable is not consumed + inp = iter(inp) + mlb = MultiLabelBinarizer(classes=[1, 3, 2]) + assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) + + # ensure a ValueError is thrown if given duplicate classes + err_msg = ( + "The classes argument contains duplicate classes. Remove " + "these duplicates before passing them to MultiLabelBinarizer." + ) + mlb = MultiLabelBinarizer(classes=[1, 3, 2, 3]) + with pytest.raises(ValueError, match=err_msg): + mlb.fit(inp) + + +def test_multilabel_binarizer_multiple_calls(): + inp = [(2, 3), (1,), (1, 2)] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1]]) + + indicator_mat2 = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) + + # first call + mlb = MultiLabelBinarizer(classes=[1, 3, 2]) + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + # second call change class + mlb.classes = [1, 2, 3] + assert_array_equal(mlb.fit_transform(inp), indicator_mat2) + + +def test_multilabel_binarizer_same_length_sequence(): + # Ensure sequences of the same length are not interpreted as a 2-d array + inp = [[1], [0], [2]] + indicator_mat = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) + # fit_transform() + mlb = MultiLabelBinarizer() + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + assert_array_equal(mlb.inverse_transform(indicator_mat), inp) + + # fit().transform() + mlb = MultiLabelBinarizer() + assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) + assert_array_equal(mlb.inverse_transform(indicator_mat), inp) + + +def test_multilabel_binarizer_non_integer_labels(): + tuple_classes = _to_object_array([(1,), (2,), (3,)]) + inputs = [ + ([("2", "3"), ("1",), ("1", "2")], ["1", "2", "3"]), + ([("b", "c"), ("a",), ("a", "b")], ["a", "b", "c"]), + ([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes), + ] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) + for inp, classes in inputs: + # fit_transform() + mlb = MultiLabelBinarizer() + inp = np.array(inp, dtype=object) + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + assert_array_equal(mlb.classes_, classes) + indicator_mat_inv = np.array(mlb.inverse_transform(indicator_mat), dtype=object) + assert_array_equal(indicator_mat_inv, inp) + + # fit().transform() + mlb = MultiLabelBinarizer() + assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) + assert_array_equal(mlb.classes_, classes) + indicator_mat_inv = np.array(mlb.inverse_transform(indicator_mat), dtype=object) + assert_array_equal(indicator_mat_inv, inp) + + mlb = MultiLabelBinarizer() + with pytest.raises(TypeError): + mlb.fit_transform([({}), ({}, {"a": "b"})]) + + +def test_multilabel_binarizer_non_unique(): + inp = [(1, 1, 1, 0)] + indicator_mat = np.array([[1, 1]]) + mlb = MultiLabelBinarizer() + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + + +def test_multilabel_binarizer_inverse_validation(): + inp = [(1, 1, 1, 0)] + mlb = MultiLabelBinarizer() + mlb.fit_transform(inp) + # Not binary + with pytest.raises(ValueError): + mlb.inverse_transform(np.array([[1, 3]])) + # The following binary cases are fine, however + mlb.inverse_transform(np.array([[0, 0]])) + mlb.inverse_transform(np.array([[1, 1]])) + mlb.inverse_transform(np.array([[1, 0]])) + + # Wrong shape + with pytest.raises(ValueError): + mlb.inverse_transform(np.array([[1]])) + with pytest.raises(ValueError): + mlb.inverse_transform(np.array([[1, 1, 1]])) + + +def test_label_binarize_with_class_order(): + out = label_binarize([1, 6], classes=[1, 2, 4, 6]) + expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]]) + assert_array_equal(out, expected) + + # Modified class order + out = label_binarize([1, 6], classes=[1, 6, 4, 2]) + expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]]) + assert_array_equal(out, expected) + + out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1]) + expected = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0]]) + assert_array_equal(out, expected) + + +def check_binarized_results(y, classes, pos_label, neg_label, expected): + for sparse_output in [True, False]: + if (pos_label == 0 or neg_label != 0) and sparse_output: + with pytest.raises(ValueError): + label_binarize( + y, + classes=classes, + neg_label=neg_label, + pos_label=pos_label, + sparse_output=sparse_output, + ) + continue + + # check label_binarize + binarized = label_binarize( + y, + classes=classes, + neg_label=neg_label, + pos_label=pos_label, + sparse_output=sparse_output, + ) + assert_array_equal(toarray(binarized), expected) + assert issparse(binarized) == sparse_output + + # check inverse + y_type = type_of_target(y) + if y_type == "multiclass": + inversed = _inverse_binarize_multiclass(binarized, classes=classes) + + else: + inversed = _inverse_binarize_thresholding( + binarized, + output_type=y_type, + classes=classes, + threshold=((neg_label + pos_label) / 2.0), + ) + + assert_array_equal(toarray(inversed), toarray(y)) + + # Check label binarizer + lb = LabelBinarizer( + neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output + ) + binarized = lb.fit_transform(y) + assert_array_equal(toarray(binarized), expected) + assert issparse(binarized) == sparse_output + inverse_output = lb.inverse_transform(binarized) + assert_array_equal(toarray(inverse_output), toarray(y)) + assert issparse(inverse_output) == issparse(y) + + +def test_label_binarize_binary(): + y = [0, 1, 0] + classes = [0, 1] + pos_label = 2 + neg_label = -1 + expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1)) + + check_binarized_results(y, classes, pos_label, neg_label, expected) + + # Binary case where sparse_output = True will not result in a ValueError + y = [0, 1, 0] + classes = [0, 1] + pos_label = 3 + neg_label = 0 + expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1)) + + check_binarized_results(y, classes, pos_label, neg_label, expected) + + +def test_label_binarize_multiclass(): + y = [0, 1, 2] + classes = [0, 1, 2] + pos_label = 2 + neg_label = 0 + expected = 2 * np.eye(3) + + check_binarized_results(y, classes, pos_label, neg_label, expected) + + with pytest.raises(ValueError): + label_binarize( + y, classes=classes, neg_label=-1, pos_label=pos_label, sparse_output=True + ) + + +@pytest.mark.parametrize( + "arr_type", + [np.array] + + COO_CONTAINERS + + CSC_CONTAINERS + + CSR_CONTAINERS + + DOK_CONTAINERS + + LIL_CONTAINERS, +) +def test_label_binarize_multilabel(arr_type): + y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]]) + classes = [0, 1, 2] + pos_label = 2 + neg_label = 0 + expected = pos_label * y_ind + y = arr_type(y_ind) + + check_binarized_results(y, classes, pos_label, neg_label, expected) + + with pytest.raises(ValueError): + label_binarize( + y, classes=classes, neg_label=-1, pos_label=pos_label, sparse_output=True + ) + + +def test_invalid_input_label_binarize(): + with pytest.raises(ValueError): + label_binarize([0, 2], classes=[0, 2], pos_label=0, neg_label=1) + with pytest.raises(ValueError, match="continuous target data is not "): + label_binarize([1.2, 2.7], classes=[0, 1]) + with pytest.raises(ValueError, match="mismatch with the labels"): + label_binarize([[1, 3]], classes=[1, 2, 3]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_inverse_binarize_multiclass(csr_container): + got = _inverse_binarize_multiclass( + csr_container([[0, 1, 0], [-1, 0, -1], [0, 0, 0]]), np.arange(3) + ) + assert_array_equal(got, np.array([1, 1, 0])) + + +def test_nan_label_encoder(): + """Check that label encoder encodes nans in transform. + + Non-regression test for #22628. + """ + le = LabelEncoder() + le.fit(["a", "a", "b", np.nan]) + + y_trans = le.transform([np.nan]) + assert_array_equal(y_trans, [2]) + + +@pytest.mark.parametrize( + "encoder", [LabelEncoder(), LabelBinarizer(), MultiLabelBinarizer()] +) +def test_label_encoders_do_not_have_set_output(encoder): + """Check that label encoders do not define set_output and work with y as a kwarg. + + Non-regression test for #26854. + """ + assert not hasattr(encoder, "set_output") + y_encoded_with_kwarg = encoder.fit_transform(y=["a", "b", "c"]) + y_encoded_positional = encoder.fit_transform(["a", "b", "c"]) + assert_array_equal(y_encoded_with_kwarg, y_encoded_positional) diff --git a/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_target_encoder.py b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_target_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..81b0f32d04d685883f8b2cad08e7df02bcc77edd --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_target_encoder.py @@ -0,0 +1,716 @@ +import re + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from sklearn.ensemble import RandomForestRegressor +from sklearn.linear_model import Ridge +from sklearn.model_selection import ( + KFold, + ShuffleSplit, + StratifiedKFold, + cross_val_score, + train_test_split, +) +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import ( + KBinsDiscretizer, + LabelBinarizer, + LabelEncoder, + TargetEncoder, +) + + +def _encode_target(X_ordinal, y_numeric, n_categories, smooth): + """Simple Python implementation of target encoding.""" + cur_encodings = np.zeros(n_categories, dtype=np.float64) + y_mean = np.mean(y_numeric) + + if smooth == "auto": + y_variance = np.var(y_numeric) + for c in range(n_categories): + y_subset = y_numeric[X_ordinal == c] + n_i = y_subset.shape[0] + + if n_i == 0: + cur_encodings[c] = y_mean + continue + + y_subset_variance = np.var(y_subset) + m = y_subset_variance / y_variance + lambda_ = n_i / (n_i + m) + + cur_encodings[c] = lambda_ * np.mean(y_subset) + (1 - lambda_) * y_mean + return cur_encodings + else: # float + for c in range(n_categories): + y_subset = y_numeric[X_ordinal == c] + current_sum = np.sum(y_subset) + y_mean * smooth + current_cnt = y_subset.shape[0] + smooth + cur_encodings[c] = current_sum / current_cnt + return cur_encodings + + +@pytest.mark.parametrize( + "categories, unknown_value", + [ + ([np.array([0, 1, 2], dtype=np.int64)], 4), + ([np.array([1.0, 3.0, np.nan], dtype=np.float64)], 6.0), + ([np.array(["cat", "dog", "snake"], dtype=object)], "bear"), + ("auto", 3), + ], +) +@pytest.mark.parametrize("smooth", [5.0, "auto"]) +@pytest.mark.parametrize("target_type", ["binary", "continuous"]) +def test_encoding(categories, unknown_value, global_random_seed, smooth, target_type): + """Check encoding for binary and continuous targets. + + Compare the values returned by `TargetEncoder.fit_transform` against the + expected encodings for cv splits from a naive reference Python + implementation in _encode_target. + """ + + n_categories = 3 + X_train_int_array = np.array([[0] * 20 + [1] * 30 + [2] * 40], dtype=np.int64).T + X_test_int_array = np.array([[0, 1, 2]], dtype=np.int64).T + n_samples = X_train_int_array.shape[0] + + if categories == "auto": + X_train = X_train_int_array + X_test = X_test_int_array + else: + X_train = categories[0][X_train_int_array] + X_test = categories[0][X_test_int_array] + + X_test = np.concatenate((X_test, [[unknown_value]])) + + data_rng = np.random.RandomState(global_random_seed) + n_splits = 3 + if target_type == "binary": + y_numeric = data_rng.randint(low=0, high=2, size=n_samples) + target_names = np.array(["cat", "dog"], dtype=object) + y_train = target_names[y_numeric] + + else: + assert target_type == "continuous" + y_numeric = data_rng.uniform(low=-10, high=20, size=n_samples) + y_train = y_numeric + + shuffled_idx = data_rng.permutation(n_samples) + X_train_int_array = X_train_int_array[shuffled_idx] + X_train = X_train[shuffled_idx] + y_train = y_train[shuffled_idx] + y_numeric = y_numeric[shuffled_idx] + + # Define our CV splitting strategy + if target_type == "binary": + cv = StratifiedKFold( + n_splits=n_splits, random_state=global_random_seed, shuffle=True + ) + else: + cv = KFold(n_splits=n_splits, random_state=global_random_seed, shuffle=True) + + # Compute the expected values using our reference Python implementation of + # target encoding: + expected_X_fit_transform = np.empty_like(X_train_int_array, dtype=np.float64) + + for train_idx, test_idx in cv.split(X_train_int_array, y_train): + X_, y_ = X_train_int_array[train_idx, 0], y_numeric[train_idx] + cur_encodings = _encode_target(X_, y_, n_categories, smooth) + expected_X_fit_transform[test_idx, 0] = cur_encodings[ + X_train_int_array[test_idx, 0] + ] + + # Check that we can obtain the same encodings by calling `fit_transform` on + # the estimator with the same CV parameters: + target_encoder = TargetEncoder( + smooth=smooth, + categories=categories, + cv=n_splits, + random_state=global_random_seed, + ) + + X_fit_transform = target_encoder.fit_transform(X_train, y_train) + + assert target_encoder.target_type_ == target_type + assert_allclose(X_fit_transform, expected_X_fit_transform) + assert len(target_encoder.encodings_) == 1 + if target_type == "binary": + assert_array_equal(target_encoder.classes_, target_names) + else: + assert target_encoder.classes_ is None + + # compute encodings for all data to validate `transform` + y_mean = np.mean(y_numeric) + expected_encodings = _encode_target( + X_train_int_array[:, 0], y_numeric, n_categories, smooth + ) + assert_allclose(target_encoder.encodings_[0], expected_encodings) + assert target_encoder.target_mean_ == pytest.approx(y_mean) + + # Transform on test data, the last value is unknown so it is encoded as the target + # mean + expected_X_test_transform = np.concatenate( + (expected_encodings, np.array([y_mean])) + ).reshape(-1, 1) + + X_test_transform = target_encoder.transform(X_test) + assert_allclose(X_test_transform, expected_X_test_transform) + + +@pytest.mark.parametrize( + "categories, unknown_values", + [ + ([np.array([0, 1, 2], dtype=np.int64)], "auto"), + ([np.array(["cat", "dog", "snake"], dtype=object)], ["bear", "rabbit"]), + ], +) +@pytest.mark.parametrize( + "target_labels", [np.array([1, 2, 3]), np.array(["a", "b", "c"])] +) +@pytest.mark.parametrize("smooth", [5.0, "auto"]) +def test_encoding_multiclass( + global_random_seed, categories, unknown_values, target_labels, smooth +): + """Check encoding for multiclass targets.""" + rng = np.random.RandomState(global_random_seed) + + n_samples = 80 + n_features = 2 + feat_1_int = np.array(rng.randint(low=0, high=2, size=n_samples)) + feat_2_int = np.array(rng.randint(low=0, high=3, size=n_samples)) + feat_1 = categories[0][feat_1_int] + feat_2 = categories[0][feat_2_int] + X_train = np.column_stack((feat_1, feat_2)) + X_train_int = np.column_stack((feat_1_int, feat_2_int)) + categories_ = [[0, 1], [0, 1, 2]] + + n_classes = 3 + y_train_int = np.array(rng.randint(low=0, high=n_classes, size=n_samples)) + y_train = target_labels[y_train_int] + y_train_enc = LabelBinarizer().fit_transform(y_train) + + n_splits = 3 + cv = StratifiedKFold( + n_splits=n_splits, random_state=global_random_seed, shuffle=True + ) + + # Manually compute encodings for cv splits to validate `fit_transform` + expected_X_fit_transform = np.empty( + (X_train_int.shape[0], X_train_int.shape[1] * n_classes), + dtype=np.float64, + ) + for f_idx, cats in enumerate(categories_): + for c_idx in range(n_classes): + for train_idx, test_idx in cv.split(X_train, y_train): + y_class = y_train_enc[:, c_idx] + X_, y_ = X_train_int[train_idx, f_idx], y_class[train_idx] + current_encoding = _encode_target(X_, y_, len(cats), smooth) + # f_idx: 0, 0, 0, 1, 1, 1 + # c_idx: 0, 1, 2, 0, 1, 2 + # exp_idx: 0, 1, 2, 3, 4, 5 + exp_idx = c_idx + (f_idx * n_classes) + expected_X_fit_transform[test_idx, exp_idx] = current_encoding[ + X_train_int[test_idx, f_idx] + ] + + target_encoder = TargetEncoder( + smooth=smooth, + cv=n_splits, + random_state=global_random_seed, + ) + X_fit_transform = target_encoder.fit_transform(X_train, y_train) + + assert target_encoder.target_type_ == "multiclass" + assert_allclose(X_fit_transform, expected_X_fit_transform) + + # Manually compute encoding to validate `transform` + expected_encodings = [] + for f_idx, cats in enumerate(categories_): + for c_idx in range(n_classes): + y_class = y_train_enc[:, c_idx] + current_encoding = _encode_target( + X_train_int[:, f_idx], y_class, len(cats), smooth + ) + expected_encodings.append(current_encoding) + + assert len(target_encoder.encodings_) == n_features * n_classes + for i in range(n_features * n_classes): + assert_allclose(target_encoder.encodings_[i], expected_encodings[i]) + assert_array_equal(target_encoder.classes_, target_labels) + + # Include unknown values at the end + X_test_int = np.array([[0, 1], [1, 2], [4, 5]]) + if unknown_values == "auto": + X_test = X_test_int + else: + X_test = np.empty_like(X_test_int[:-1, :], dtype=object) + for column_idx in range(X_test_int.shape[1]): + X_test[:, column_idx] = categories[0][X_test_int[:-1, column_idx]] + # Add unknown values at end + X_test = np.vstack((X_test, unknown_values)) + + y_mean = np.mean(y_train_enc, axis=0) + expected_X_test_transform = np.empty( + (X_test_int.shape[0], X_test_int.shape[1] * n_classes), + dtype=np.float64, + ) + n_rows = X_test_int.shape[0] + f_idx = [0, 0, 0, 1, 1, 1] + # Last row are unknowns, dealt with later + for row_idx in range(n_rows - 1): + for i, enc in enumerate(expected_encodings): + expected_X_test_transform[row_idx, i] = enc[X_test_int[row_idx, f_idx[i]]] + + # Unknowns encoded as target mean for each class + # `y_mean` contains target mean for each class, thus cycle through mean of + # each class, `n_features` times + mean_idx = [0, 1, 2, 0, 1, 2] + for i in range(n_classes * n_features): + expected_X_test_transform[n_rows - 1, i] = y_mean[mean_idx[i]] + + X_test_transform = target_encoder.transform(X_test) + assert_allclose(X_test_transform, expected_X_test_transform) + + +@pytest.mark.parametrize( + "X, categories", + [ + ( + np.array([[0] * 10 + [1] * 10 + [3]], dtype=np.int64).T, # 3 is unknown + [[0, 1, 2]], + ), + ( + np.array( + [["cat"] * 10 + ["dog"] * 10 + ["snake"]], dtype=object + ).T, # snake is unknown + [["dog", "cat", "cow"]], + ), + ], +) +@pytest.mark.parametrize("smooth", [4.0, "auto"]) +def test_custom_categories(X, categories, smooth): + """Custom categories with unknown categories that are not in training data.""" + rng = np.random.RandomState(0) + y = rng.uniform(low=-10, high=20, size=X.shape[0]) + enc = TargetEncoder(categories=categories, smooth=smooth, random_state=0).fit(X, y) + + # The last element is unknown and encoded as the mean + y_mean = y.mean() + X_trans = enc.transform(X[-1:]) + assert X_trans[0, 0] == pytest.approx(y_mean) + + assert len(enc.encodings_) == 1 + # custom category that is not in training data + assert enc.encodings_[0][-1] == pytest.approx(y_mean) + + +@pytest.mark.parametrize( + "y, msg", + [ + ([1, 2, 0, 1], "Found input variables with inconsistent"), + ( + np.array([[1, 2, 0], [1, 2, 3]]).T, + "Target type was inferred to be 'multiclass-multioutput'", + ), + ], +) +def test_errors(y, msg): + """Check invalidate input.""" + X = np.array([[1, 0, 1]]).T + + enc = TargetEncoder() + with pytest.raises(ValueError, match=msg): + enc.fit_transform(X, y) + + +def test_use_regression_target(): + """Check inferred and specified `target_type` on regression target.""" + X = np.array([[0, 1, 0, 1, 0, 1]]).T + y = np.array([1.0, 2.0, 3.0, 2.0, 3.0, 4.0]) + + enc = TargetEncoder(cv=2) + with pytest.warns( + UserWarning, + match=re.escape( + "The least populated class in y has only 1 members, which is less than" + " n_splits=2." + ), + ): + enc.fit_transform(X, y) + assert enc.target_type_ == "multiclass" + + enc = TargetEncoder(cv=2, target_type="continuous") + enc.fit_transform(X, y) + assert enc.target_type_ == "continuous" + + +@pytest.mark.parametrize( + "y, feature_names", + [ + ([1, 2] * 10, ["A", "B"]), + ([1, 2, 3] * 6 + [1, 2], ["A_1", "A_2", "A_3", "B_1", "B_2", "B_3"]), + ( + ["y1", "y2", "y3"] * 6 + ["y1", "y2"], + ["A_y1", "A_y2", "A_y3", "B_y1", "B_y2", "B_y3"], + ), + ], +) +def test_feature_names_out_set_output(y, feature_names): + """Check TargetEncoder works with set_output.""" + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"] * 10, "B": [1, 2] * 10}) + + enc_default = TargetEncoder(cv=2, smooth=3.0, random_state=0) + enc_default.set_output(transform="default") + enc_pandas = TargetEncoder(cv=2, smooth=3.0, random_state=0) + enc_pandas.set_output(transform="pandas") + + X_default = enc_default.fit_transform(X_df, y) + X_pandas = enc_pandas.fit_transform(X_df, y) + + assert_allclose(X_pandas.to_numpy(), X_default) + assert_array_equal(enc_pandas.get_feature_names_out(), feature_names) + assert_array_equal(enc_pandas.get_feature_names_out(), X_pandas.columns) + + +@pytest.mark.parametrize("to_pandas", [True, False]) +@pytest.mark.parametrize("smooth", [1.0, "auto"]) +@pytest.mark.parametrize("target_type", ["binary-ints", "binary-str", "continuous"]) +def test_multiple_features_quick(to_pandas, smooth, target_type): + """Check target encoder with multiple features.""" + X_ordinal = np.array( + [[1, 1], [0, 1], [1, 1], [2, 1], [1, 0], [0, 1], [1, 0], [0, 0]], dtype=np.int64 + ) + if target_type == "binary-str": + y_train = np.array(["a", "b", "a", "a", "b", "b", "a", "b"]) + y_integer = LabelEncoder().fit_transform(y_train) + cv = StratifiedKFold(2, random_state=0, shuffle=True) + elif target_type == "binary-ints": + y_train = np.array([3, 4, 3, 3, 3, 4, 4, 4]) + y_integer = LabelEncoder().fit_transform(y_train) + cv = StratifiedKFold(2, random_state=0, shuffle=True) + else: + y_train = np.array([3.0, 5.1, 2.4, 3.5, 4.1, 5.5, 10.3, 7.3], dtype=np.float32) + y_integer = y_train + cv = KFold(2, random_state=0, shuffle=True) + y_mean = np.mean(y_integer) + categories = [[0, 1, 2], [0, 1]] + + X_test = np.array( + [ + [0, 1], + [3, 0], # 3 is unknown + [1, 10], # 10 is unknown + ], + dtype=np.int64, + ) + + if to_pandas: + pd = pytest.importorskip("pandas") + # convert second feature to an object + X_train = pd.DataFrame( + { + "feat0": X_ordinal[:, 0], + "feat1": np.array(["cat", "dog"], dtype=object)[X_ordinal[:, 1]], + } + ) + # "snake" is unknown + X_test = pd.DataFrame({"feat0": X_test[:, 0], "feat1": ["dog", "cat", "snake"]}) + else: + X_train = X_ordinal + + # manually compute encoding for fit_transform + expected_X_fit_transform = np.empty_like(X_ordinal, dtype=np.float64) + for f_idx, cats in enumerate(categories): + for train_idx, test_idx in cv.split(X_ordinal, y_integer): + X_, y_ = X_ordinal[train_idx, f_idx], y_integer[train_idx] + current_encoding = _encode_target(X_, y_, len(cats), smooth) + expected_X_fit_transform[test_idx, f_idx] = current_encoding[ + X_ordinal[test_idx, f_idx] + ] + + # manually compute encoding for transform + expected_encodings = [] + for f_idx, cats in enumerate(categories): + current_encoding = _encode_target( + X_ordinal[:, f_idx], y_integer, len(cats), smooth + ) + expected_encodings.append(current_encoding) + + expected_X_test_transform = np.array( + [ + [expected_encodings[0][0], expected_encodings[1][1]], + [y_mean, expected_encodings[1][0]], + [expected_encodings[0][1], y_mean], + ], + dtype=np.float64, + ) + + enc = TargetEncoder(smooth=smooth, cv=2, random_state=0) + X_fit_transform = enc.fit_transform(X_train, y_train) + assert_allclose(X_fit_transform, expected_X_fit_transform) + + assert len(enc.encodings_) == 2 + for i in range(2): + assert_allclose(enc.encodings_[i], expected_encodings[i]) + + X_test_transform = enc.transform(X_test) + assert_allclose(X_test_transform, expected_X_test_transform) + + +@pytest.mark.parametrize( + "y, y_mean", + [ + (np.array([3.4] * 20), 3.4), + (np.array([0] * 20), 0), + (np.array(["a"] * 20, dtype=object), 0), + ], + ids=["continuous", "binary", "binary-string"], +) +@pytest.mark.parametrize("smooth", ["auto", 4.0, 0.0]) +def test_constant_target_and_feature(y, y_mean, smooth): + """Check edge case where feature and target is constant.""" + X = np.array([[1] * 20]).T + n_samples = X.shape[0] + + enc = TargetEncoder(cv=2, smooth=smooth, random_state=0) + X_trans = enc.fit_transform(X, y) + assert_allclose(X_trans, np.repeat([[y_mean]], n_samples, axis=0)) + assert enc.encodings_[0][0] == pytest.approx(y_mean) + assert enc.target_mean_ == pytest.approx(y_mean) + + X_test = np.array([[1], [0]]) + X_test_trans = enc.transform(X_test) + assert_allclose(X_test_trans, np.repeat([[y_mean]], 2, axis=0)) + + +def test_fit_transform_not_associated_with_y_if_ordinal_categorical_is_not( + global_random_seed, +): + cardinality = 30 # not too large, otherwise we need a very large n_samples + n_samples = 3000 + rng = np.random.RandomState(global_random_seed) + y_train = rng.normal(size=n_samples) + X_train = rng.randint(0, cardinality, size=n_samples).reshape(-1, 1) + + # Sort by y_train to attempt to cause a leak + y_sorted_indices = y_train.argsort() + y_train = y_train[y_sorted_indices] + X_train = X_train[y_sorted_indices] + + target_encoder = TargetEncoder(shuffle=True, random_state=global_random_seed) + X_encoded_train_shuffled = target_encoder.fit_transform(X_train, y_train) + + target_encoder = TargetEncoder(shuffle=False) + X_encoded_train_no_shuffled = target_encoder.fit_transform(X_train, y_train) + + # Check that no information about y_train has leaked into X_train: + regressor = RandomForestRegressor( + n_estimators=10, min_samples_leaf=20, random_state=global_random_seed + ) + + # It's impossible to learn a good predictive model on the training set when + # using the original representation X_train or the target encoded + # representation with shuffled inner CV. For the latter, no information + # about y_train has inadvertently leaked into the prior used to generate + # `X_encoded_train_shuffled`: + cv = ShuffleSplit(n_splits=50, random_state=global_random_seed) + assert cross_val_score(regressor, X_train, y_train, cv=cv).mean() < 0.1 + assert ( + cross_val_score(regressor, X_encoded_train_shuffled, y_train, cv=cv).mean() + < 0.1 + ) + + # Without the inner CV shuffling, a lot of information about y_train goes into the + # the per-fold y_train.mean() priors: shrinkage is no longer effective in this + # case and would no longer be able to prevent downstream over-fitting. + assert ( + cross_val_score(regressor, X_encoded_train_no_shuffled, y_train, cv=cv).mean() + > 0.5 + ) + + +def test_smooth_zero(): + """Check edge case with zero smoothing and cv does not contain category.""" + X = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]]).T + y = np.array([2.1, 4.3, 1.2, 3.1, 1.0, 9.0, 10.3, 14.2, 13.3, 15.0]) + + enc = TargetEncoder(smooth=0.0, shuffle=False, cv=2) + X_trans = enc.fit_transform(X, y) + + # With cv = 2, category 0 does not exist in the second half, thus + # it will be encoded as the mean of the second half + assert_allclose(X_trans[0], np.mean(y[5:])) + + # category 1 does not exist in the first half, thus it will be encoded as + # the mean of the first half + assert_allclose(X_trans[-1], np.mean(y[:5])) + + +@pytest.mark.parametrize("smooth", [0.0, 1e3, "auto"]) +def test_invariance_of_encoding_under_label_permutation(smooth, global_random_seed): + # Check that the encoding does not depend on the integer of the value of + # the integer labels. This is quite a trivial property but it is helpful + # to understand the following test. + rng = np.random.RandomState(global_random_seed) + + # Random y and informative categorical X to make the test non-trivial when + # using smoothing. + y = rng.normal(size=1000) + n_categories = 30 + X = KBinsDiscretizer(n_bins=n_categories, encode="ordinal").fit_transform( + y.reshape(-1, 1) + ) + + X_train, X_test, y_train, y_test = train_test_split( + X, y, random_state=global_random_seed + ) + + # Shuffle the labels to make sure that the encoding is invariant to the + # permutation of the labels + permutated_labels = rng.permutation(n_categories) + X_train_permuted = permutated_labels[X_train.astype(np.int32)] + X_test_permuted = permutated_labels[X_test.astype(np.int32)] + + target_encoder = TargetEncoder(smooth=smooth, random_state=global_random_seed) + X_train_encoded = target_encoder.fit_transform(X_train, y_train) + X_test_encoded = target_encoder.transform(X_test) + + X_train_permuted_encoded = target_encoder.fit_transform(X_train_permuted, y_train) + X_test_permuted_encoded = target_encoder.transform(X_test_permuted) + + assert_allclose(X_train_encoded, X_train_permuted_encoded) + assert_allclose(X_test_encoded, X_test_permuted_encoded) + + +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +@pytest.mark.parametrize("smooth", [0.0, "auto"]) +def test_target_encoding_for_linear_regression(smooth, global_random_seed): + # Check some expected statistical properties when fitting a linear + # regression model on target encoded features depending on their relation + # with that target. + + # In this test, we use the Ridge class with the "lsqr" solver and a little + # bit of regularization to implement a linear regression model that + # converges quickly for large `n_samples` and robustly in case of + # correlated features. Since we will fit this model on a mean centered + # target, we do not need to fit an intercept and this will help simplify + # the analysis with respect to the expected coefficients. + linear_regression = Ridge(alpha=1e-6, solver="lsqr", fit_intercept=False) + + # Construct a random target variable. We need a large number of samples for + # this test to be stable across all values of the random seed. + n_samples = 50_000 + rng = np.random.RandomState(global_random_seed) + y = rng.randn(n_samples) + + # Generate a single informative ordinal feature with medium cardinality. + # Inject some irreducible noise to make it harder for a multivariate model + # to identify the informative feature from other pure noise features. + noise = 0.8 * rng.randn(n_samples) + n_categories = 100 + X_informative = KBinsDiscretizer( + n_bins=n_categories, + encode="ordinal", + strategy="uniform", + random_state=rng, + ).fit_transform((y + noise).reshape(-1, 1)) + + # Let's permute the labels to hide the fact that this feature is + # informative to naive linear regression model trained on the raw ordinal + # values. As highlighted in the previous test, the target encoding should be + # invariant to such a permutation. + permutated_labels = rng.permutation(n_categories) + X_informative = permutated_labels[X_informative.astype(np.int32)] + + # Generate a shuffled copy of the informative feature to destroy the + # relationship with the target. + X_shuffled = rng.permutation(X_informative) + + # Also include a very high cardinality categorical feature that is by + # itself independent of the target variable: target encoding such a feature + # without internal cross-validation should cause catastrophic overfitting + # for the downstream regressor, even with shrinkage. This kind of features + # typically represents near unique identifiers of samples. In general they + # should be removed from a machine learning datasets but here we want to + # study the ability of the default behavior of TargetEncoder to mitigate + # them automatically. + X_near_unique_categories = rng.choice( + int(0.9 * n_samples), size=n_samples, replace=True + ).reshape(-1, 1) + + # Assemble the dataset and do a train-test split: + X = np.concatenate( + [X_informative, X_shuffled, X_near_unique_categories], + axis=1, + ) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + # Let's first check that a linear regression model trained on the raw + # features underfits because of the meaning-less ordinal encoding of the + # labels. + raw_model = linear_regression.fit(X_train, y_train) + assert raw_model.score(X_train, y_train) < 0.1 + assert raw_model.score(X_test, y_test) < 0.1 + + # Now do the same with target encoding using the internal CV mechanism + # implemented when using fit_transform. + model_with_cv = make_pipeline( + TargetEncoder(smooth=smooth, random_state=rng), linear_regression + ).fit(X_train, y_train) + + # This model should be able to fit the data well and also generalise to the + # test data (assuming that the binning is fine-grained enough). The R2 + # scores are not perfect because of the noise injected during the + # generation of the unique informative feature. + coef = model_with_cv[-1].coef_ + assert model_with_cv.score(X_train, y_train) > 0.5, coef + assert model_with_cv.score(X_test, y_test) > 0.5, coef + + # The target encoder recovers the linear relationship with slope 1 between + # the target encoded unique informative predictor and the target. Since the + # target encoding of the 2 other features is not informative thanks to the + # use of internal cross-validation, the multivariate linear regressor + # assigns a coef of 1 to the first feature and 0 to the other 2. + assert coef[0] == pytest.approx(1, abs=1e-2) + assert (np.abs(coef[1:]) < 0.2).all() + + # Let's now disable the internal cross-validation by calling fit and then + # transform separately on the training set: + target_encoder = TargetEncoder(smooth=smooth, random_state=rng).fit( + X_train, y_train + ) + X_enc_no_cv_train = target_encoder.transform(X_train) + X_enc_no_cv_test = target_encoder.transform(X_test) + model_no_cv = linear_regression.fit(X_enc_no_cv_train, y_train) + + # The linear regression model should always overfit because it assigns + # too much weight to the extremely high cardinality feature relatively to + # the informative feature. Note that this is the case even when using + # the empirical Bayes smoothing which is not enough to prevent such + # overfitting alone. + coef = model_no_cv.coef_ + assert model_no_cv.score(X_enc_no_cv_train, y_train) > 0.7, coef + assert model_no_cv.score(X_enc_no_cv_test, y_test) < 0.5, coef + + # The model overfits because it assigns too much weight to the high + # cardinality yet non-informative feature instead of the lower + # cardinality yet informative feature: + assert abs(coef[0]) < abs(coef[2]) + + +def test_pandas_copy_on_write(): + """ + Test target-encoder cython code when y is read-only. + + The numpy array underlying df["y"] is read-only when copy-on-write is enabled. + Non-regression test for gh-27879. + """ + pd = pytest.importorskip("pandas", minversion="2.0") + with pd.option_context("mode.copy_on_write", True): + df = pd.DataFrame({"x": ["a", "b", "b"], "y": [4.0, 5.0, 6.0]}) + TargetEncoder(target_type="continuous").fit(df[["x"]], df["y"])