diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_birch.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_birch.py new file mode 100644 index 0000000000000000000000000000000000000000..d62fb880ba8b2633dfb3ee70cc7ad4dc3203eeb3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_birch.py @@ -0,0 +1,741 @@ +# Authors: Manoj Kumar +# Alexandre Gramfort +# Joel Nothman +# License: BSD 3 clause + +import warnings +from math import sqrt +from numbers import Integral, Real + +import numpy as np +from scipy import sparse + +from .._config import config_context +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + ClusterMixin, + TransformerMixin, + _fit_context, +) +from ..exceptions import ConvergenceWarning +from ..metrics import pairwise_distances_argmin +from ..metrics.pairwise import euclidean_distances +from ..utils._param_validation import Interval +from ..utils.extmath import row_norms +from ..utils.validation import check_is_fitted +from . import AgglomerativeClustering + + +def _iterate_sparse_X(X): + """This little hack returns a densified row when iterating over a sparse + matrix, instead of constructing a sparse matrix for every row that is + expensive. + """ + n_samples = X.shape[0] + X_indices = X.indices + X_data = X.data + X_indptr = X.indptr + + for i in range(n_samples): + row = np.zeros(X.shape[1]) + startptr, endptr = X_indptr[i], X_indptr[i + 1] + nonzero_indices = X_indices[startptr:endptr] + row[nonzero_indices] = X_data[startptr:endptr] + yield row + + +def _split_node(node, threshold, branching_factor): + """The node has to be split if there is no place for a new subcluster + in the node. + 1. Two empty nodes and two empty subclusters are initialized. + 2. The pair of distant subclusters are found. + 3. The properties of the empty subclusters and nodes are updated + according to the nearest distance between the subclusters to the + pair of distant subclusters. + 4. The two nodes are set as children to the two subclusters. + """ + new_subcluster1 = _CFSubcluster() + new_subcluster2 = _CFSubcluster() + new_node1 = _CFNode( + threshold=threshold, + branching_factor=branching_factor, + is_leaf=node.is_leaf, + n_features=node.n_features, + dtype=node.init_centroids_.dtype, + ) + new_node2 = _CFNode( + threshold=threshold, + branching_factor=branching_factor, + is_leaf=node.is_leaf, + n_features=node.n_features, + dtype=node.init_centroids_.dtype, + ) + new_subcluster1.child_ = new_node1 + new_subcluster2.child_ = new_node2 + + if node.is_leaf: + if node.prev_leaf_ is not None: + node.prev_leaf_.next_leaf_ = new_node1 + new_node1.prev_leaf_ = node.prev_leaf_ + new_node1.next_leaf_ = new_node2 + new_node2.prev_leaf_ = new_node1 + new_node2.next_leaf_ = node.next_leaf_ + if node.next_leaf_ is not None: + node.next_leaf_.prev_leaf_ = new_node2 + + dist = euclidean_distances( + node.centroids_, Y_norm_squared=node.squared_norm_, squared=True + ) + n_clusters = dist.shape[0] + + farthest_idx = np.unravel_index(dist.argmax(), (n_clusters, n_clusters)) + node1_dist, node2_dist = dist[(farthest_idx,)] + + node1_closer = node1_dist < node2_dist + # make sure node1 is closest to itself even if all distances are equal. + # This can only happen when all node.centroids_ are duplicates leading to all + # distances between centroids being zero. + node1_closer[farthest_idx[0]] = True + + for idx, subcluster in enumerate(node.subclusters_): + if node1_closer[idx]: + new_node1.append_subcluster(subcluster) + new_subcluster1.update(subcluster) + else: + new_node2.append_subcluster(subcluster) + new_subcluster2.update(subcluster) + return new_subcluster1, new_subcluster2 + + +class _CFNode: + """Each node in a CFTree is called a CFNode. + + The CFNode can have a maximum of branching_factor + number of CFSubclusters. + + Parameters + ---------- + threshold : float + Threshold needed for a new subcluster to enter a CFSubcluster. + + branching_factor : int + Maximum number of CF subclusters in each node. + + is_leaf : bool + We need to know if the CFNode is a leaf or not, in order to + retrieve the final subclusters. + + n_features : int + The number of features. + + Attributes + ---------- + subclusters_ : list + List of subclusters for a particular CFNode. + + prev_leaf_ : _CFNode + Useful only if is_leaf is True. + + next_leaf_ : _CFNode + next_leaf. Useful only if is_leaf is True. + the final subclusters. + + init_centroids_ : ndarray of shape (branching_factor + 1, n_features) + Manipulate ``init_centroids_`` throughout rather than centroids_ since + the centroids are just a view of the ``init_centroids_`` . + + init_sq_norm_ : ndarray of shape (branching_factor + 1,) + manipulate init_sq_norm_ throughout. similar to ``init_centroids_``. + + centroids_ : ndarray of shape (branching_factor + 1, n_features) + View of ``init_centroids_``. + + squared_norm_ : ndarray of shape (branching_factor + 1,) + View of ``init_sq_norm_``. + + """ + + def __init__(self, *, threshold, branching_factor, is_leaf, n_features, dtype): + self.threshold = threshold + self.branching_factor = branching_factor + self.is_leaf = is_leaf + self.n_features = n_features + + # The list of subclusters, centroids and squared norms + # to manipulate throughout. + self.subclusters_ = [] + self.init_centroids_ = np.zeros((branching_factor + 1, n_features), dtype=dtype) + self.init_sq_norm_ = np.zeros((branching_factor + 1), dtype) + self.squared_norm_ = [] + self.prev_leaf_ = None + self.next_leaf_ = None + + def append_subcluster(self, subcluster): + n_samples = len(self.subclusters_) + self.subclusters_.append(subcluster) + self.init_centroids_[n_samples] = subcluster.centroid_ + self.init_sq_norm_[n_samples] = subcluster.sq_norm_ + + # Keep centroids and squared norm as views. In this way + # if we change init_centroids and init_sq_norm_, it is + # sufficient, + self.centroids_ = self.init_centroids_[: n_samples + 1, :] + self.squared_norm_ = self.init_sq_norm_[: n_samples + 1] + + def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2): + """Remove a subcluster from a node and update it with the + split subclusters. + """ + ind = self.subclusters_.index(subcluster) + self.subclusters_[ind] = new_subcluster1 + self.init_centroids_[ind] = new_subcluster1.centroid_ + self.init_sq_norm_[ind] = new_subcluster1.sq_norm_ + self.append_subcluster(new_subcluster2) + + def insert_cf_subcluster(self, subcluster): + """Insert a new subcluster into the node.""" + if not self.subclusters_: + self.append_subcluster(subcluster) + return False + + threshold = self.threshold + branching_factor = self.branching_factor + # We need to find the closest subcluster among all the + # subclusters so that we can insert our new subcluster. + dist_matrix = np.dot(self.centroids_, subcluster.centroid_) + dist_matrix *= -2.0 + dist_matrix += self.squared_norm_ + closest_index = np.argmin(dist_matrix) + closest_subcluster = self.subclusters_[closest_index] + + # If the subcluster has a child, we need a recursive strategy. + if closest_subcluster.child_ is not None: + split_child = closest_subcluster.child_.insert_cf_subcluster(subcluster) + + if not split_child: + # If it is determined that the child need not be split, we + # can just update the closest_subcluster + closest_subcluster.update(subcluster) + self.init_centroids_[closest_index] = self.subclusters_[ + closest_index + ].centroid_ + self.init_sq_norm_[closest_index] = self.subclusters_[ + closest_index + ].sq_norm_ + return False + + # things not too good. we need to redistribute the subclusters in + # our child node, and add a new subcluster in the parent + # subcluster to accommodate the new child. + else: + new_subcluster1, new_subcluster2 = _split_node( + closest_subcluster.child_, + threshold, + branching_factor, + ) + self.update_split_subclusters( + closest_subcluster, new_subcluster1, new_subcluster2 + ) + + if len(self.subclusters_) > self.branching_factor: + return True + return False + + # good to go! + else: + merged = closest_subcluster.merge_subcluster(subcluster, self.threshold) + if merged: + self.init_centroids_[closest_index] = closest_subcluster.centroid_ + self.init_sq_norm_[closest_index] = closest_subcluster.sq_norm_ + return False + + # not close to any other subclusters, and we still + # have space, so add. + elif len(self.subclusters_) < self.branching_factor: + self.append_subcluster(subcluster) + return False + + # We do not have enough space nor is it closer to an + # other subcluster. We need to split. + else: + self.append_subcluster(subcluster) + return True + + +class _CFSubcluster: + """Each subcluster in a CFNode is called a CFSubcluster. + + A CFSubcluster can have a CFNode has its child. + + Parameters + ---------- + linear_sum : ndarray of shape (n_features,), default=None + Sample. This is kept optional to allow initialization of empty + subclusters. + + Attributes + ---------- + n_samples_ : int + Number of samples that belong to each subcluster. + + linear_sum_ : ndarray + Linear sum of all the samples in a subcluster. Prevents holding + all sample data in memory. + + squared_sum_ : float + Sum of the squared l2 norms of all samples belonging to a subcluster. + + centroid_ : ndarray of shape (branching_factor + 1, n_features) + Centroid of the subcluster. Prevent recomputing of centroids when + ``CFNode.centroids_`` is called. + + child_ : _CFNode + Child Node of the subcluster. Once a given _CFNode is set as the child + of the _CFNode, it is set to ``self.child_``. + + sq_norm_ : ndarray of shape (branching_factor + 1,) + Squared norm of the subcluster. Used to prevent recomputing when + pairwise minimum distances are computed. + """ + + def __init__(self, *, linear_sum=None): + if linear_sum is None: + self.n_samples_ = 0 + self.squared_sum_ = 0.0 + self.centroid_ = self.linear_sum_ = 0 + else: + self.n_samples_ = 1 + self.centroid_ = self.linear_sum_ = linear_sum + self.squared_sum_ = self.sq_norm_ = np.dot( + self.linear_sum_, self.linear_sum_ + ) + self.child_ = None + + def update(self, subcluster): + self.n_samples_ += subcluster.n_samples_ + self.linear_sum_ += subcluster.linear_sum_ + self.squared_sum_ += subcluster.squared_sum_ + self.centroid_ = self.linear_sum_ / self.n_samples_ + self.sq_norm_ = np.dot(self.centroid_, self.centroid_) + + def merge_subcluster(self, nominee_cluster, threshold): + """Check if a cluster is worthy enough to be merged. If + yes then merge. + """ + new_ss = self.squared_sum_ + nominee_cluster.squared_sum_ + new_ls = self.linear_sum_ + nominee_cluster.linear_sum_ + new_n = self.n_samples_ + nominee_cluster.n_samples_ + new_centroid = (1 / new_n) * new_ls + new_sq_norm = np.dot(new_centroid, new_centroid) + + # The squared radius of the cluster is defined: + # r^2 = sum_i ||x_i - c||^2 / n + # with x_i the n points assigned to the cluster and c its centroid: + # c = sum_i x_i / n + # This can be expanded to: + # r^2 = sum_i ||x_i||^2 / n - 2 < sum_i x_i / n, c> + n ||c||^2 / n + # and therefore simplifies to: + # r^2 = sum_i ||x_i||^2 / n - ||c||^2 + sq_radius = new_ss / new_n - new_sq_norm + + if sq_radius <= threshold**2: + ( + self.n_samples_, + self.linear_sum_, + self.squared_sum_, + self.centroid_, + self.sq_norm_, + ) = (new_n, new_ls, new_ss, new_centroid, new_sq_norm) + return True + return False + + @property + def radius(self): + """Return radius of the subcluster""" + # Because of numerical issues, this could become negative + sq_radius = self.squared_sum_ / self.n_samples_ - self.sq_norm_ + return sqrt(max(0, sq_radius)) + + +class Birch( + ClassNamePrefixFeaturesOutMixin, ClusterMixin, TransformerMixin, BaseEstimator +): + """Implements the BIRCH clustering algorithm. + + It is a memory-efficient, online-learning algorithm provided as an + alternative to :class:`MiniBatchKMeans`. It constructs a tree + data structure with the cluster centroids being read off the leaf. + These can be either the final cluster centroids or can be provided as input + to another clustering algorithm such as :class:`AgglomerativeClustering`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.16 + + Parameters + ---------- + threshold : float, default=0.5 + The radius of the subcluster obtained by merging a new sample and the + closest subcluster should be lesser than the threshold. Otherwise a new + subcluster is started. Setting this value to be very low promotes + splitting and vice-versa. + + branching_factor : int, default=50 + Maximum number of CF subclusters in each node. If a new samples enters + such that the number of subclusters exceed the branching_factor then + that node is split into two nodes with the subclusters redistributed + in each. The parent subcluster of that node is removed and two new + subclusters are added as parents of the 2 split nodes. + + n_clusters : int, instance of sklearn.cluster model or None, default=3 + Number of clusters after the final clustering step, which treats the + subclusters from the leaves as new samples. + + - `None` : the final clustering step is not performed and the + subclusters are returned as they are. + + - :mod:`sklearn.cluster` Estimator : If a model is provided, the model + is fit treating the subclusters as new samples and the initial data + is mapped to the label of the closest subcluster. + + - `int` : the model fit is :class:`AgglomerativeClustering` with + `n_clusters` set to be equal to the int. + + compute_labels : bool, default=True + Whether or not to compute labels for each fit. + + copy : bool, default=True + Whether or not to make a copy of the given data. If set to False, + the initial data will be overwritten. + + Attributes + ---------- + root_ : _CFNode + Root of the CFTree. + + dummy_leaf_ : _CFNode + Start pointer to all the leaves. + + subcluster_centers_ : ndarray + Centroids of all subclusters read directly from the leaves. + + subcluster_labels_ : ndarray + Labels assigned to the centroids of the subclusters after + they are clustered globally. + + labels_ : ndarray of shape (n_samples,) + Array of labels assigned to the input data. + if partial_fit is used instead of fit, they are assigned to the + last batch of data. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MiniBatchKMeans : Alternative implementation that does incremental updates + of the centers' positions using mini-batches. + + Notes + ----- + The tree data structure consists of nodes with each node consisting of + a number of subclusters. The maximum number of subclusters in a node + is determined by the branching factor. Each subcluster maintains a + linear sum, squared sum and the number of samples in that subcluster. + In addition, each subcluster can also have a node as its child, if the + subcluster is not a member of a leaf node. + + For a new point entering the root, it is merged with the subcluster closest + to it and the linear sum, squared sum and the number of samples of that + subcluster are updated. This is done recursively till the properties of + the leaf node are updated. + + References + ---------- + * Tian Zhang, Raghu Ramakrishnan, Maron Livny + BIRCH: An efficient data clustering method for large databases. + https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf + + * Roberto Perdisci + JBirch - Java implementation of BIRCH clustering algorithm + https://code.google.com/archive/p/jbirch + + Examples + -------- + >>> from sklearn.cluster import Birch + >>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]] + >>> brc = Birch(n_clusters=None) + >>> brc.fit(X) + Birch(n_clusters=None) + >>> brc.predict(X) + array([0, 0, 0, 1, 1, 1]) + """ + + _parameter_constraints: dict = { + "threshold": [Interval(Real, 0.0, None, closed="neither")], + "branching_factor": [Interval(Integral, 1, None, closed="neither")], + "n_clusters": [None, ClusterMixin, Interval(Integral, 1, None, closed="left")], + "compute_labels": ["boolean"], + "copy": ["boolean"], + } + + def __init__( + self, + *, + threshold=0.5, + branching_factor=50, + n_clusters=3, + compute_labels=True, + copy=True, + ): + self.threshold = threshold + self.branching_factor = branching_factor + self.n_clusters = n_clusters + self.compute_labels = compute_labels + self.copy = copy + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """ + Build a CF Tree for the input data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self + Fitted estimator. + """ + return self._fit(X, partial=False) + + def _fit(self, X, partial): + has_root = getattr(self, "root_", None) + first_call = not (partial and has_root) + + X = self._validate_data( + X, + accept_sparse="csr", + copy=self.copy, + reset=first_call, + dtype=[np.float64, np.float32], + ) + threshold = self.threshold + branching_factor = self.branching_factor + + n_samples, n_features = X.shape + + # If partial_fit is called for the first time or fit is called, we + # start a new tree. + if first_call: + # The first root is the leaf. Manipulate this object throughout. + self.root_ = _CFNode( + threshold=threshold, + branching_factor=branching_factor, + is_leaf=True, + n_features=n_features, + dtype=X.dtype, + ) + + # To enable getting back subclusters. + self.dummy_leaf_ = _CFNode( + threshold=threshold, + branching_factor=branching_factor, + is_leaf=True, + n_features=n_features, + dtype=X.dtype, + ) + self.dummy_leaf_.next_leaf_ = self.root_ + self.root_.prev_leaf_ = self.dummy_leaf_ + + # Cannot vectorize. Enough to convince to use cython. + if not sparse.issparse(X): + iter_func = iter + else: + iter_func = _iterate_sparse_X + + for sample in iter_func(X): + subcluster = _CFSubcluster(linear_sum=sample) + split = self.root_.insert_cf_subcluster(subcluster) + + if split: + new_subcluster1, new_subcluster2 = _split_node( + self.root_, threshold, branching_factor + ) + del self.root_ + self.root_ = _CFNode( + threshold=threshold, + branching_factor=branching_factor, + is_leaf=False, + n_features=n_features, + dtype=X.dtype, + ) + self.root_.append_subcluster(new_subcluster1) + self.root_.append_subcluster(new_subcluster2) + + centroids = np.concatenate([leaf.centroids_ for leaf in self._get_leaves()]) + self.subcluster_centers_ = centroids + self._n_features_out = self.subcluster_centers_.shape[0] + + self._global_clustering(X) + return self + + def _get_leaves(self): + """ + Retrieve the leaves of the CF Node. + + Returns + ------- + leaves : list of shape (n_leaves,) + List of the leaf nodes. + """ + leaf_ptr = self.dummy_leaf_.next_leaf_ + leaves = [] + while leaf_ptr is not None: + leaves.append(leaf_ptr) + leaf_ptr = leaf_ptr.next_leaf_ + return leaves + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X=None, y=None): + """ + Online learning. Prevents rebuilding of CFTree from scratch. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), \ + default=None + Input data. If X is not provided, only the global clustering + step is done. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self + Fitted estimator. + """ + if X is None: + # Perform just the final global clustering step. + self._global_clustering() + return self + else: + return self._fit(X, partial=True) + + def _check_fit(self, X): + check_is_fitted(self) + + if ( + hasattr(self, "subcluster_centers_") + and X.shape[1] != self.subcluster_centers_.shape[1] + ): + raise ValueError( + "Training data and predicted data do not have same number of features." + ) + + def predict(self, X): + """ + Predict data using the ``centroids_`` of subclusters. + + Avoid computation of the row norms of X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + Returns + ------- + labels : ndarray of shape(n_samples,) + Labelled data. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse="csr", reset=False) + return self._predict(X) + + def _predict(self, X): + """Predict data using the ``centroids_`` of subclusters.""" + kwargs = {"Y_norm_squared": self._subcluster_norms} + + with config_context(assume_finite=True): + argmin = pairwise_distances_argmin( + X, self.subcluster_centers_, metric_kwargs=kwargs + ) + return self.subcluster_labels_[argmin] + + def transform(self, X): + """ + Transform X into subcluster centroids dimension. + + Each dimension represents the distance from the sample point to each + cluster centroid. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + Returns + ------- + X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters) + Transformed data. + """ + check_is_fitted(self) + X = self._validate_data(X, accept_sparse="csr", reset=False) + with config_context(assume_finite=True): + return euclidean_distances(X, self.subcluster_centers_) + + def _global_clustering(self, X=None): + """ + Global clustering for the subclusters obtained after fitting + """ + clusterer = self.n_clusters + centroids = self.subcluster_centers_ + compute_labels = (X is not None) and self.compute_labels + + # Preprocessing for the global clustering. + not_enough_centroids = False + if isinstance(clusterer, Integral): + clusterer = AgglomerativeClustering(n_clusters=self.n_clusters) + # There is no need to perform the global clustering step. + if len(centroids) < self.n_clusters: + not_enough_centroids = True + + # To use in predict to avoid recalculation. + self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True) + + if clusterer is None or not_enough_centroids: + self.subcluster_labels_ = np.arange(len(centroids)) + if not_enough_centroids: + warnings.warn( + "Number of subclusters found (%d) by BIRCH is less " + "than (%d). Decrease the threshold." + % (len(centroids), self.n_clusters), + ConvergenceWarning, + ) + else: + # The global clustering step that clusters the subclusters of + # the leaves. It assumes the centroids of the subclusters as + # samples and finds the final centroids. + self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_) + + if compute_labels: + self.labels_ = self._predict(X) + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_bisect_k_means.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_bisect_k_means.py new file mode 100644 index 0000000000000000000000000000000000000000..a1f7716ced822cd9c9494d545daa67983c5e10d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_bisect_k_means.py @@ -0,0 +1,529 @@ +"""Bisecting K-means clustering.""" +# Author: Michal Krawczyk + +import warnings + +import numpy as np +import scipy.sparse as sp + +from ..base import _fit_context +from ..utils._openmp_helpers import _openmp_effective_n_threads +from ..utils._param_validation import Integral, Interval, StrOptions +from ..utils.extmath import row_norms +from ..utils.validation import _check_sample_weight, check_is_fitted, check_random_state +from ._k_means_common import _inertia_dense, _inertia_sparse +from ._kmeans import ( + _BaseKMeans, + _kmeans_single_elkan, + _kmeans_single_lloyd, + _labels_inertia_threadpool_limit, +) + + +class _BisectingTree: + """Tree structure representing the hierarchical clusters of BisectingKMeans.""" + + def __init__(self, center, indices, score): + """Create a new cluster node in the tree. + + The node holds the center of this cluster and the indices of the data points + that belong to it. + """ + self.center = center + self.indices = indices + self.score = score + + self.left = None + self.right = None + + def split(self, labels, centers, scores): + """Split the cluster node into two subclusters.""" + self.left = _BisectingTree( + indices=self.indices[labels == 0], center=centers[0], score=scores[0] + ) + self.right = _BisectingTree( + indices=self.indices[labels == 1], center=centers[1], score=scores[1] + ) + + # reset the indices attribute to save memory + self.indices = None + + def get_cluster_to_bisect(self): + """Return the cluster node to bisect next. + + It's based on the score of the cluster, which can be either the number of + data points assigned to that cluster or the inertia of that cluster + (see `bisecting_strategy` for details). + """ + max_score = None + + for cluster_leaf in self.iter_leaves(): + if max_score is None or cluster_leaf.score > max_score: + max_score = cluster_leaf.score + best_cluster_leaf = cluster_leaf + + return best_cluster_leaf + + def iter_leaves(self): + """Iterate over all the cluster leaves in the tree.""" + if self.left is None: + yield self + else: + yield from self.left.iter_leaves() + yield from self.right.iter_leaves() + + +class BisectingKMeans(_BaseKMeans): + """Bisecting K-Means clustering. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.1 + + Parameters + ---------- + n_clusters : int, default=8 + The number of clusters to form as well as the number of + centroids to generate. + + init : {'k-means++', 'random'} or callable, default='random' + Method for initialization: + + 'k-means++' : selects initial cluster centers for k-mean + clustering in a smart way to speed up convergence. See section + Notes in k_init for more details. + + 'random': choose `n_clusters` observations (rows) at random from data + for the initial centroids. + + If a callable is passed, it should take arguments X, n_clusters and a + random state and return an initialization. + + n_init : int, default=1 + Number of time the inner k-means algorithm will be run with different + centroid seeds in each bisection. + That will result producing for each bisection best output of n_init + consecutive runs in terms of inertia. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for centroid initialization + in inner K-Means. Use an int to make the randomness deterministic. + See :term:`Glossary `. + + max_iter : int, default=300 + Maximum number of iterations of the inner k-means algorithm at each + bisection. + + verbose : int, default=0 + Verbosity mode. + + tol : float, default=1e-4 + Relative tolerance with regards to Frobenius norm of the difference + in the cluster centers of two consecutive iterations to declare + convergence. Used in inner k-means algorithm at each bisection to pick + best possible clusters. + + copy_x : bool, default=True + When pre-computing distances it is more numerically accurate to center + the data first. If copy_x is True (default), then the original data is + not modified. If False, the original data is modified, and put back + before the function returns, but small numerical differences may be + introduced by subtracting and then adding the data mean. Note that if + the original data is not C-contiguous, a copy will be made even if + copy_x is False. If the original data is sparse, but not in CSR format, + a copy will be made even if copy_x is False. + + algorithm : {"lloyd", "elkan"}, default="lloyd" + Inner K-means algorithm used in bisection. + The classical EM-style algorithm is `"lloyd"`. + The `"elkan"` variation can be more efficient on some datasets with + well-defined clusters, by using the triangle inequality. However it's + more memory intensive due to the allocation of an extra array of shape + `(n_samples, n_clusters)`. + + bisecting_strategy : {"biggest_inertia", "largest_cluster"},\ + default="biggest_inertia" + Defines how bisection should be performed: + + - "biggest_inertia" means that BisectingKMeans will always check + all calculated cluster for cluster with biggest SSE + (Sum of squared errors) and bisect it. This approach concentrates on + precision, but may be costly in terms of execution time (especially for + larger amount of data points). + + - "largest_cluster" - BisectingKMeans will always split cluster with + largest amount of points assigned to it from all clusters + previously calculated. That should work faster than picking by SSE + ('biggest_inertia') and may produce similar results in most cases. + + Attributes + ---------- + cluster_centers_ : ndarray of shape (n_clusters, n_features) + Coordinates of cluster centers. If the algorithm stops before fully + converging (see ``tol`` and ``max_iter``), these will not be + consistent with ``labels_``. + + labels_ : ndarray of shape (n_samples,) + Labels of each point. + + inertia_ : float + Sum of squared distances of samples to their closest cluster center, + weighted by the sample weights if provided. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + See Also + -------- + KMeans : Original implementation of K-Means algorithm. + + Notes + ----- + It might be inefficient when n_cluster is less than 3, due to unnecessary + calculations for that case. + + Examples + -------- + >>> from sklearn.cluster import BisectingKMeans + >>> import numpy as np + >>> X = np.array([[1, 1], [10, 1], [3, 1], + ... [10, 0], [2, 1], [10, 2], + ... [10, 8], [10, 9], [10, 10]]) + >>> bisect_means = BisectingKMeans(n_clusters=3, random_state=0).fit(X) + >>> bisect_means.labels_ + array([0, 2, 0, 2, 0, 2, 1, 1, 1], dtype=int32) + >>> bisect_means.predict([[0, 0], [12, 3]]) + array([0, 2], dtype=int32) + >>> bisect_means.cluster_centers_ + array([[ 2., 1.], + [10., 9.], + [10., 1.]]) + """ + + _parameter_constraints: dict = { + **_BaseKMeans._parameter_constraints, + "init": [StrOptions({"k-means++", "random"}), callable], + "n_init": [Interval(Integral, 1, None, closed="left")], + "copy_x": ["boolean"], + "algorithm": [StrOptions({"lloyd", "elkan"})], + "bisecting_strategy": [StrOptions({"biggest_inertia", "largest_cluster"})], + } + + def __init__( + self, + n_clusters=8, + *, + init="random", + n_init=1, + random_state=None, + max_iter=300, + verbose=0, + tol=1e-4, + copy_x=True, + algorithm="lloyd", + bisecting_strategy="biggest_inertia", + ): + super().__init__( + n_clusters=n_clusters, + init=init, + max_iter=max_iter, + verbose=verbose, + random_state=random_state, + tol=tol, + n_init=n_init, + ) + + self.copy_x = copy_x + self.algorithm = algorithm + self.bisecting_strategy = bisecting_strategy + + def _warn_mkl_vcomp(self, n_active_threads): + """Warn when vcomp and mkl are both present""" + warnings.warn( + "BisectingKMeans is known to have a memory leak on Windows " + "with MKL, when there are less chunks than available " + "threads. You can avoid it by setting the environment" + f" variable OMP_NUM_THREADS={n_active_threads}." + ) + + def _inertia_per_cluster(self, X, centers, labels, sample_weight): + """Calculate the sum of squared errors (inertia) per cluster. + + Parameters + ---------- + X : {ndarray, csr_matrix} of shape (n_samples, n_features) + The input samples. + + centers : ndarray of shape (n_clusters=2, n_features) + The cluster centers. + + labels : ndarray of shape (n_samples,) + Index of the cluster each sample belongs to. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in X. + + Returns + ------- + inertia_per_cluster : ndarray of shape (n_clusters=2,) + Sum of squared errors (inertia) for each cluster. + """ + n_clusters = centers.shape[0] # = 2 since centers comes from a bisection + _inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense + + inertia_per_cluster = np.empty(n_clusters) + for label in range(n_clusters): + inertia_per_cluster[label] = _inertia( + X, sample_weight, centers, labels, self._n_threads, single_label=label + ) + + return inertia_per_cluster + + def _bisect(self, X, x_squared_norms, sample_weight, cluster_to_bisect): + """Split a cluster into 2 subsclusters. + + Parameters + ---------- + X : {ndarray, csr_matrix} of shape (n_samples, n_features) + Training instances to cluster. + + x_squared_norms : ndarray of shape (n_samples,) + Squared euclidean norm of each data point. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in X. + + cluster_to_bisect : _BisectingTree node object + The cluster node to split. + """ + X = X[cluster_to_bisect.indices] + x_squared_norms = x_squared_norms[cluster_to_bisect.indices] + sample_weight = sample_weight[cluster_to_bisect.indices] + + best_inertia = None + + # Split samples in X into 2 clusters. + # Repeating `n_init` times to obtain best clusters + for _ in range(self.n_init): + centers_init = self._init_centroids( + X, + x_squared_norms=x_squared_norms, + init=self.init, + random_state=self._random_state, + n_centroids=2, + sample_weight=sample_weight, + ) + + labels, inertia, centers, _ = self._kmeans_single( + X, + sample_weight, + centers_init, + max_iter=self.max_iter, + verbose=self.verbose, + tol=self.tol, + n_threads=self._n_threads, + ) + + # allow small tolerance on the inertia to accommodate for + # non-deterministic rounding errors due to parallel computation + if best_inertia is None or inertia < best_inertia * (1 - 1e-6): + best_labels = labels + best_centers = centers + best_inertia = inertia + + if self.verbose: + print(f"New centroids from bisection: {best_centers}") + + if self.bisecting_strategy == "biggest_inertia": + scores = self._inertia_per_cluster( + X, best_centers, best_labels, sample_weight + ) + else: # bisecting_strategy == "largest_cluster" + # Using minlength to make sure that we have the counts for both labels even + # if all samples are labelled 0. + scores = np.bincount(best_labels, minlength=2) + + cluster_to_bisect.split(best_labels, best_centers, scores) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, sample_weight=None): + """Compute bisecting k-means clustering. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + + Training instances to cluster. + + .. note:: The data will be converted to C ordering, + which will cause a memory copy + if the given data is not C-contiguous. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + The weights for each observation in X. If None, all observations + are assigned equal weight. `sample_weight` is not used during + initialization if `init` is a callable. + + Returns + ------- + self + Fitted estimator. + """ + X = self._validate_data( + X, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + copy=self.copy_x, + accept_large_sparse=False, + ) + + self._check_params_vs_input(X) + + self._random_state = check_random_state(self.random_state) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + self._n_threads = _openmp_effective_n_threads() + + if self.algorithm == "lloyd" or self.n_clusters == 1: + self._kmeans_single = _kmeans_single_lloyd + self._check_mkl_vcomp(X, X.shape[0]) + else: + self._kmeans_single = _kmeans_single_elkan + + # Subtract of mean of X for more accurate distance computations + if not sp.issparse(X): + self._X_mean = X.mean(axis=0) + X -= self._X_mean + + # Initialize the hierarchical clusters tree + self._bisecting_tree = _BisectingTree( + indices=np.arange(X.shape[0]), + center=X.mean(axis=0), + score=0, + ) + + x_squared_norms = row_norms(X, squared=True) + + for _ in range(self.n_clusters - 1): + # Chose cluster to bisect + cluster_to_bisect = self._bisecting_tree.get_cluster_to_bisect() + + # Split this cluster into 2 subclusters + self._bisect(X, x_squared_norms, sample_weight, cluster_to_bisect) + + # Aggregate final labels and centers from the bisecting tree + self.labels_ = np.full(X.shape[0], -1, dtype=np.int32) + self.cluster_centers_ = np.empty((self.n_clusters, X.shape[1]), dtype=X.dtype) + + for i, cluster_node in enumerate(self._bisecting_tree.iter_leaves()): + self.labels_[cluster_node.indices] = i + self.cluster_centers_[i] = cluster_node.center + cluster_node.label = i # label final clusters for future prediction + cluster_node.indices = None # release memory + + # Restore original data + if not sp.issparse(X): + X += self._X_mean + self.cluster_centers_ += self._X_mean + + _inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense + self.inertia_ = _inertia( + X, sample_weight, self.cluster_centers_, self.labels_, self._n_threads + ) + + self._n_features_out = self.cluster_centers_.shape[0] + + return self + + def predict(self, X): + """Predict which cluster each sample in X belongs to. + + Prediction is made by going down the hierarchical tree + in searching of closest leaf cluster. + + In the vector quantization literature, `cluster_centers_` is called + the code book and each value returned by `predict` is the index of + the closest code in the code book. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + New data to predict. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Index of the cluster each sample belongs to. + """ + check_is_fitted(self) + + X = self._check_test_data(X) + x_squared_norms = row_norms(X, squared=True) + + # sample weights are unused but necessary in cython helpers + sample_weight = np.ones_like(x_squared_norms) + + labels = self._predict_recursive(X, sample_weight, self._bisecting_tree) + + return labels + + def _predict_recursive(self, X, sample_weight, cluster_node): + """Predict recursively by going down the hierarchical tree. + + Parameters + ---------- + X : {ndarray, csr_matrix} of shape (n_samples, n_features) + The data points, currently assigned to `cluster_node`, to predict between + the subclusters of this node. + + sample_weight : ndarray of shape (n_samples,) + The weights for each observation in X. + + cluster_node : _BisectingTree node object + The cluster node of the hierarchical tree. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Index of the cluster each sample belongs to. + """ + if cluster_node.left is None: + # This cluster has no subcluster. Labels are just the label of the cluster. + return np.full(X.shape[0], cluster_node.label, dtype=np.int32) + + # Determine if data points belong to the left or right subcluster + centers = np.vstack((cluster_node.left.center, cluster_node.right.center)) + if hasattr(self, "_X_mean"): + centers += self._X_mean + + cluster_labels = _labels_inertia_threadpool_limit( + X, + sample_weight, + centers, + self._n_threads, + return_inertia=False, + ) + mask = cluster_labels == 0 + + # Compute the labels for each subset of the data points. + labels = np.full(X.shape[0], -1, dtype=np.int32) + + labels[mask] = self._predict_recursive( + X[mask], sample_weight[mask], cluster_node.left + ) + + labels[~mask] = self._predict_recursive( + X[~mask], sample_weight[~mask], cluster_node.right + ) + + return labels + + def _more_tags(self): + return {"preserves_dtype": [np.float64, np.float32]} diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py new file mode 100644 index 0000000000000000000000000000000000000000..fbcbd73dfbb3bbf4e27243a5d266cb0fd19bd276 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py @@ -0,0 +1,476 @@ +""" +DBSCAN: Density-Based Spatial Clustering of Applications with Noise +""" + +# Author: Robert Layton +# Joel Nothman +# Lars Buitinck +# +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import sparse + +from ..base import BaseEstimator, ClusterMixin, _fit_context +from ..metrics.pairwise import _VALID_METRICS +from ..neighbors import NearestNeighbors +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.validation import _check_sample_weight +from ._dbscan_inner import dbscan_inner + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=False, +) +def dbscan( + X, + eps=0.5, + *, + min_samples=5, + metric="minkowski", + metric_params=None, + algorithm="auto", + leaf_size=30, + p=2, + sample_weight=None, + n_jobs=None, +): + """Perform DBSCAN clustering from vector array or distance matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse (CSR) matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) + A feature array, or array of distances between samples if + ``metric='precomputed'``. + + eps : float, default=0.5 + The maximum distance between two samples for one to be considered + as in the neighborhood of the other. This is not a maximum bound + on the distances of points within a cluster. This is the most + important DBSCAN parameter to choose appropriately for your data set + and distance function. + + min_samples : int, default=5 + The number of samples (or total weight) in a neighborhood for a point + to be considered as a core point. This includes the point itself. + + metric : str or callable, default='minkowski' + The metric to use when calculating distance between instances in a + feature array. If metric is a string or callable, it must be one of + the options allowed by :func:`sklearn.metrics.pairwise_distances` for + its metric parameter. + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. + X may be a :term:`sparse graph `, + in which case only "nonzero" elements may be considered neighbors. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + .. versionadded:: 0.19 + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + The algorithm to be used by the NearestNeighbors module + to compute pointwise distances and find nearest neighbors. + See NearestNeighbors module documentation for details. + + leaf_size : int, default=30 + Leaf size passed to BallTree or cKDTree. This can affect the speed + of the construction and query, as well as the memory required + to store the tree. The optimal value depends + on the nature of the problem. + + p : float, default=2 + The power of the Minkowski metric to be used to calculate distance + between points. + + sample_weight : array-like of shape (n_samples,), default=None + Weight of each sample, such that a sample with a weight of at least + ``min_samples`` is by itself a core sample; a sample with negative + weight may inhibit its eps-neighbor from being core. + Note that weights are absolute, and default to 1. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. ``None`` means + 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means + using all processors. See :term:`Glossary ` for more details. + If precomputed distance are used, parallel execution is not available + and thus n_jobs will have no effect. + + Returns + ------- + core_samples : ndarray of shape (n_core_samples,) + Indices of core samples. + + labels : ndarray of shape (n_samples,) + Cluster labels for each point. Noisy samples are given the label -1. + + See Also + -------- + DBSCAN : An estimator interface for this clustering algorithm. + OPTICS : A similar estimator interface clustering at multiple values of + eps. Our implementation is optimized for memory usage. + + Notes + ----- + For an example, see :ref:`examples/cluster/plot_dbscan.py + `. + + This implementation bulk-computes all neighborhood queries, which increases + the memory complexity to O(n.d) where d is the average number of neighbors, + while original DBSCAN had memory complexity O(n). It may attract a higher + memory complexity when querying these nearest neighborhoods, depending + on the ``algorithm``. + + One way to avoid the query complexity is to pre-compute sparse + neighborhoods in chunks using + :func:`NearestNeighbors.radius_neighbors_graph + ` with + ``mode='distance'``, then using ``metric='precomputed'`` here. + + Another way to reduce memory and computation time is to remove + (near-)duplicate points and use ``sample_weight`` instead. + + :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower + memory usage. + + References + ---------- + Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based + Algorithm for Discovering Clusters in Large Spatial Databases with Noise" + `_. + In: Proceedings of the 2nd International Conference on Knowledge Discovery + and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 + + Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). + :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." + <10.1145/3068335>` + ACM Transactions on Database Systems (TODS), 42(3), 19. + + Examples + -------- + >>> from sklearn.cluster import dbscan + >>> X = [[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]] + >>> core_samples, labels = dbscan(X, eps=3, min_samples=2) + >>> core_samples + array([0, 1, 2, 3, 4]) + >>> labels + array([ 0, 0, 0, 1, 1, -1]) + """ + + est = DBSCAN( + eps=eps, + min_samples=min_samples, + metric=metric, + metric_params=metric_params, + algorithm=algorithm, + leaf_size=leaf_size, + p=p, + n_jobs=n_jobs, + ) + est.fit(X, sample_weight=sample_weight) + return est.core_sample_indices_, est.labels_ + + +class DBSCAN(ClusterMixin, BaseEstimator): + """Perform DBSCAN clustering from vector array or distance matrix. + + DBSCAN - Density-Based Spatial Clustering of Applications with Noise. + Finds core samples of high density and expands clusters from them. + Good for data which contains clusters of similar density. + + The worst case memory complexity of DBSCAN is :math:`O({n}^2)`, which can + occur when the `eps` param is large and `min_samples` is low. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + eps : float, default=0.5 + The maximum distance between two samples for one to be considered + as in the neighborhood of the other. This is not a maximum bound + on the distances of points within a cluster. This is the most + important DBSCAN parameter to choose appropriately for your data set + and distance function. + + min_samples : int, default=5 + The number of samples (or total weight) in a neighborhood for a point to + be considered as a core point. This includes the point itself. If + `min_samples` is set to a higher value, DBSCAN will find denser clusters, + whereas if it is set to a lower value, the found clusters will be more + sparse. + + metric : str, or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. If metric is a string or callable, it must be one of + the options allowed by :func:`sklearn.metrics.pairwise_distances` for + its metric parameter. + If metric is "precomputed", X is assumed to be a distance matrix and + must be square. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors for DBSCAN. + + .. versionadded:: 0.17 + metric *precomputed* to accept precomputed sparse matrix. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + .. versionadded:: 0.19 + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + The algorithm to be used by the NearestNeighbors module + to compute pointwise distances and find nearest neighbors. + See NearestNeighbors module documentation for details. + + leaf_size : int, default=30 + Leaf size passed to BallTree or cKDTree. This can affect the speed + of the construction and query, as well as the memory required + to store the tree. The optimal value depends + on the nature of the problem. + + p : float, default=None + The power of the Minkowski metric to be used to calculate distance + between points. If None, then ``p=2`` (equivalent to the Euclidean + distance). + + n_jobs : int, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + core_sample_indices_ : ndarray of shape (n_core_samples,) + Indices of core samples. + + components_ : ndarray of shape (n_core_samples, n_features) + Copy of each core sample found by training. + + labels_ : ndarray of shape (n_samples) + Cluster labels for each point in the dataset given to fit(). + Noisy samples are given the label -1. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + OPTICS : A similar clustering at multiple values of eps. Our implementation + is optimized for memory usage. + + Notes + ----- + For an example, see :ref:`examples/cluster/plot_dbscan.py + `. + + This implementation bulk-computes all neighborhood queries, which increases + the memory complexity to O(n.d) where d is the average number of neighbors, + while original DBSCAN had memory complexity O(n). It may attract a higher + memory complexity when querying these nearest neighborhoods, depending + on the ``algorithm``. + + One way to avoid the query complexity is to pre-compute sparse + neighborhoods in chunks using + :func:`NearestNeighbors.radius_neighbors_graph + ` with + ``mode='distance'``, then using ``metric='precomputed'`` here. + + Another way to reduce memory and computation time is to remove + (near-)duplicate points and use ``sample_weight`` instead. + + :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory + usage. + + References + ---------- + Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based + Algorithm for Discovering Clusters in Large Spatial Databases with Noise" + `_. + In: Proceedings of the 2nd International Conference on Knowledge Discovery + and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 + + Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). + :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." + <10.1145/3068335>` + ACM Transactions on Database Systems (TODS), 42(3), 19. + + Examples + -------- + >>> from sklearn.cluster import DBSCAN + >>> import numpy as np + >>> X = np.array([[1, 2], [2, 2], [2, 3], + ... [8, 7], [8, 8], [25, 80]]) + >>> clustering = DBSCAN(eps=3, min_samples=2).fit(X) + >>> clustering.labels_ + array([ 0, 0, 0, 1, 1, -1]) + >>> clustering + DBSCAN(eps=3, min_samples=2) + """ + + _parameter_constraints: dict = { + "eps": [Interval(Real, 0.0, None, closed="neither")], + "min_samples": [Interval(Integral, 1, None, closed="left")], + "metric": [ + StrOptions(set(_VALID_METRICS) | {"precomputed"}), + callable, + ], + "metric_params": [dict, None], + "algorithm": [StrOptions({"auto", "ball_tree", "kd_tree", "brute"})], + "leaf_size": [Interval(Integral, 1, None, closed="left")], + "p": [Interval(Real, 0.0, None, closed="left"), None], + "n_jobs": [Integral, None], + } + + def __init__( + self, + eps=0.5, + *, + min_samples=5, + metric="euclidean", + metric_params=None, + algorithm="auto", + leaf_size=30, + p=None, + n_jobs=None, + ): + self.eps = eps + self.min_samples = min_samples + self.metric = metric + self.metric_params = metric_params + self.algorithm = algorithm + self.leaf_size = leaf_size + self.p = p + self.n_jobs = n_jobs + + @_fit_context( + # DBSCAN.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, sample_weight=None): + """Perform DBSCAN clustering from features, or distance matrix. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ + (n_samples, n_samples) + Training instances to cluster, or distances between instances if + ``metric='precomputed'``. If a sparse matrix is provided, it will + be converted into a sparse ``csr_matrix``. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + Weight of each sample, such that a sample with a weight of at least + ``min_samples`` is by itself a core sample; a sample with a + negative weight may inhibit its eps-neighbor from being core. + Note that weights are absolute, and default to 1. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + X = self._validate_data(X, accept_sparse="csr") + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + # Calculate neighborhood for all samples. This leaves the original + # point in, which needs to be considered later (i.e. point i is in the + # neighborhood of point i. While True, its useless information) + if self.metric == "precomputed" and sparse.issparse(X): + # set the diagonal to explicit values, as a point is its own + # neighbor + X = X.copy() # copy to avoid in-place modification + with warnings.catch_warnings(): + warnings.simplefilter("ignore", sparse.SparseEfficiencyWarning) + X.setdiag(X.diagonal()) + + neighbors_model = NearestNeighbors( + radius=self.eps, + algorithm=self.algorithm, + leaf_size=self.leaf_size, + metric=self.metric, + metric_params=self.metric_params, + p=self.p, + n_jobs=self.n_jobs, + ) + neighbors_model.fit(X) + # This has worst case O(n^2) memory complexity + neighborhoods = neighbors_model.radius_neighbors(X, return_distance=False) + + if sample_weight is None: + n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods]) + else: + n_neighbors = np.array( + [np.sum(sample_weight[neighbors]) for neighbors in neighborhoods] + ) + + # Initially, all samples are noise. + labels = np.full(X.shape[0], -1, dtype=np.intp) + + # A list of all core samples found. + core_samples = np.asarray(n_neighbors >= self.min_samples, dtype=np.uint8) + dbscan_inner(core_samples, neighborhoods, labels) + + self.core_sample_indices_ = np.where(core_samples)[0] + self.labels_ = labels + + if len(self.core_sample_indices_): + # fix for scipy sparse indexing issue + self.components_ = X[self.core_sample_indices_].copy() + else: + # no core samples + self.components_ = np.empty((0, X.shape[1])) + return self + + def fit_predict(self, X, y=None, sample_weight=None): + """Compute clusters from a data or distance matrix and predict labels. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ + (n_samples, n_samples) + Training instances to cluster, or distances between instances if + ``metric='precomputed'``. If a sparse matrix is provided, it will + be converted into a sparse ``csr_matrix``. + + y : Ignored + Not used, present here for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + Weight of each sample, such that a sample with a weight of at least + ``min_samples`` is by itself a core sample; a sample with a + negative weight may inhibit its eps-neighbor from being core. + Note that weights are absolute, and default to 1. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Cluster labels. Noisy samples are given the label -1. + """ + self.fit(X, sample_weight=sample_weight) + return self.labels_ + + def _more_tags(self): + return {"pairwise": self.metric == "precomputed"} diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_feature_agglomeration.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_feature_agglomeration.py new file mode 100644 index 0000000000000000000000000000000000000000..f84f18c1c18b3b71f4efcab8445c93f56a609318 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_feature_agglomeration.py @@ -0,0 +1,104 @@ +""" +Feature agglomeration. Base classes and functions for performing feature +agglomeration. +""" +# Author: V. Michel, A. Gramfort +# License: BSD 3 clause + +import warnings + +import numpy as np +from scipy.sparse import issparse + +from ..base import TransformerMixin +from ..utils import metadata_routing +from ..utils.validation import check_is_fitted + +############################################################################### +# Mixin class for feature agglomeration. + + +class AgglomerationTransform(TransformerMixin): + """ + A class for feature agglomeration via the transform interface. + """ + + # This prevents ``set_split_inverse_transform`` to be generated for the + # non-standard ``Xred`` arg on ``inverse_transform``. + # TODO(1.5): remove when Xred is removed for inverse_transform. + __metadata_request__inverse_transform = {"Xred": metadata_routing.UNUSED} + + def transform(self, X): + """ + Transform a new matrix using the built clustering. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or \ + (n_samples, n_samples) + A M by N array of M observations in N dimensions or a length + M array of M one-dimensional observations. + + Returns + ------- + Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,) + The pooled values for each feature cluster. + """ + check_is_fitted(self) + + X = self._validate_data(X, reset=False) + if self.pooling_func == np.mean and not issparse(X): + size = np.bincount(self.labels_) + n_samples = X.shape[0] + # a fast way to compute the mean of grouped features + nX = np.array( + [np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)] + ) + else: + nX = [ + self.pooling_func(X[:, self.labels_ == l], axis=1) + for l in np.unique(self.labels_) + ] + nX = np.array(nX).T + return nX + + def inverse_transform(self, Xt=None, Xred=None): + """ + Inverse the transformation and return a vector of size `n_features`. + + Parameters + ---------- + Xt : array-like of shape (n_samples, n_clusters) or (n_clusters,) + The values to be assigned to each cluster of samples. + + Xred : deprecated + Use `Xt` instead. + + .. deprecated:: 1.3 + + Returns + ------- + X : ndarray of shape (n_samples, n_features) or (n_features,) + A vector of size `n_samples` with the values of `Xred` assigned to + each of the cluster of samples. + """ + if Xt is None and Xred is None: + raise TypeError("Missing required positional argument: Xt") + + if Xred is not None and Xt is not None: + raise ValueError("Please provide only `Xt`, and not `Xred`.") + + if Xred is not None: + warnings.warn( + ( + "Input argument `Xred` was renamed to `Xt` in v1.3 and will be" + " removed in v1.5." + ), + FutureWarning, + ) + Xt = Xred + + check_is_fitted(self) + + unil, inverse = np.unique(self.labels_, return_inverse=True) + return Xt[..., inverse] diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..54c057631c1b32be60da76c17e56bc71ddb90d29 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.pxd b/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9a41ea68d1bafc0cad55c028e0413e463ddb6d2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_common.pxd @@ -0,0 +1,48 @@ +from cython cimport floating + + +cdef floating _euclidean_dense_dense( + const floating*, + const floating*, + int, + bint +) noexcept nogil + +cdef floating _euclidean_sparse_dense( + const floating[::1], + const int[::1], + const floating[::1], + floating, + bint +) noexcept nogil + +cpdef void _relocate_empty_clusters_dense( + const floating[:, ::1], + const floating[::1], + const floating[:, ::1], + floating[:, ::1], + floating[::1], + const int[::1] +) + +cpdef void _relocate_empty_clusters_sparse( + const floating[::1], + const int[::1], + const int[::1], + const floating[::1], + const floating[:, ::1], + floating[:, ::1], + floating[::1], + const int[::1] +) + +cdef void _average_centers( + floating[:, ::1], + const floating[::1] +) + +cdef void _center_shift( + const floating[:, ::1], + const floating[:, ::1], + floating[::1] +) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_elkan.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_elkan.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..175a86ac7952c024ac29981bba5f8b26def1d51c Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_elkan.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_lloyd.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_lloyd.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e028fd42843dfb803237c8284bd508654a7de8c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_lloyd.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_minibatch.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_minibatch.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ce010c7e3fef2c62e9e0604d8ddd9f111df630f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/cluster/_k_means_minibatch.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_mean_shift.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_mean_shift.py new file mode 100644 index 0000000000000000000000000000000000000000..fae11cca7df233963c7de73f42f5706ef7caf4c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_mean_shift.py @@ -0,0 +1,575 @@ +"""Mean shift clustering algorithm. + +Mean shift clustering aims to discover *blobs* in a smooth density of +samples. It is a centroid based algorithm, which works by updating candidates +for centroids to be the mean of the points within a given region. These +candidates are then filtered in a post-processing stage to eliminate +near-duplicates to form the final set of centroids. + +Seeding is performed using a binning technique for scalability. +""" + +# Authors: Conrad Lee +# Alexandre Gramfort +# Gael Varoquaux +# Martino Sorbaro + +import warnings +from collections import defaultdict +from numbers import Integral, Real + +import numpy as np + +from .._config import config_context +from ..base import BaseEstimator, ClusterMixin, _fit_context +from ..metrics.pairwise import pairwise_distances_argmin +from ..neighbors import NearestNeighbors +from ..utils import check_array, check_random_state, gen_batches +from ..utils._param_validation import Interval, validate_params +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted + + +@validate_params( + { + "X": ["array-like"], + "quantile": [Interval(Real, 0, 1, closed="both")], + "n_samples": [Interval(Integral, 1, None, closed="left"), None], + "random_state": ["random_state"], + "n_jobs": [Integral, None], + }, + prefer_skip_nested_validation=True, +) +def estimate_bandwidth(X, *, quantile=0.3, n_samples=None, random_state=0, n_jobs=None): + """Estimate the bandwidth to use with the mean-shift algorithm. + + This function takes time at least quadratic in `n_samples`. For large + datasets, it is wise to subsample by setting `n_samples`. Alternatively, + the parameter `bandwidth` can be set to a small value without estimating + it. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input points. + + quantile : float, default=0.3 + Should be between [0, 1] + 0.5 means that the median of all pairwise distances is used. + + n_samples : int, default=None + The number of samples to use. If not given, all samples are used. + + random_state : int, RandomState instance, default=None + The generator used to randomly select the samples from input points + for bandwidth estimation. Use an int to make the randomness + deterministic. + See :term:`Glossary `. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + bandwidth : float + The bandwidth parameter. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import estimate_bandwidth + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> estimate_bandwidth(X, quantile=0.5) + 1.61... + """ + X = check_array(X) + + random_state = check_random_state(random_state) + if n_samples is not None: + idx = random_state.permutation(X.shape[0])[:n_samples] + X = X[idx] + n_neighbors = int(X.shape[0] * quantile) + if n_neighbors < 1: # cannot fit NearestNeighbors with n_neighbors = 0 + n_neighbors = 1 + nbrs = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=n_jobs) + nbrs.fit(X) + + bandwidth = 0.0 + for batch in gen_batches(len(X), 500): + d, _ = nbrs.kneighbors(X[batch, :], return_distance=True) + bandwidth += np.max(d, axis=1).sum() + + return bandwidth / X.shape[0] + + +# separate function for each seed's iterative loop +def _mean_shift_single_seed(my_mean, X, nbrs, max_iter): + # For each seed, climb gradient until convergence or max_iter + bandwidth = nbrs.get_params()["radius"] + stop_thresh = 1e-3 * bandwidth # when mean has converged + completed_iterations = 0 + while True: + # Find mean of points within bandwidth + i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth, return_distance=False)[0] + points_within = X[i_nbrs] + if len(points_within) == 0: + break # Depending on seeding strategy this condition may occur + my_old_mean = my_mean # save the old mean + my_mean = np.mean(points_within, axis=0) + # If converged or at max_iter, adds the cluster + if ( + np.linalg.norm(my_mean - my_old_mean) < stop_thresh + or completed_iterations == max_iter + ): + break + completed_iterations += 1 + return tuple(my_mean), len(points_within), completed_iterations + + +@validate_params( + {"X": ["array-like"]}, + prefer_skip_nested_validation=False, +) +def mean_shift( + X, + *, + bandwidth=None, + seeds=None, + bin_seeding=False, + min_bin_freq=1, + cluster_all=True, + max_iter=300, + n_jobs=None, +): + """Perform mean shift clustering of data using a flat kernel. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + + X : array-like of shape (n_samples, n_features) + Input data. + + bandwidth : float, default=None + Kernel bandwidth. If not None, must be in the range [0, +inf). + + If None, the bandwidth is determined using a heuristic based on + the median of all pairwise distances. This will take quadratic time in + the number of samples. The sklearn.cluster.estimate_bandwidth function + can be used to do this more efficiently. + + seeds : array-like of shape (n_seeds, n_features) or None + Point used as initial kernel locations. If None and bin_seeding=False, + each data point is used as a seed. If None and bin_seeding=True, + see bin_seeding. + + bin_seeding : bool, default=False + If true, initial kernel locations are not locations of all + points, but rather the location of the discretized version of + points, where points are binned onto a grid whose coarseness + corresponds to the bandwidth. Setting this option to True will speed + up the algorithm because fewer seeds will be initialized. + Ignored if seeds argument is not None. + + min_bin_freq : int, default=1 + To speed up the algorithm, accept only those bins with at least + min_bin_freq points as seeds. + + cluster_all : bool, default=True + If true, then all points are clustered, even those orphans that are + not within any kernel. Orphans are assigned to the nearest kernel. + If false, then orphans are given cluster label -1. + + max_iter : int, default=300 + Maximum number of iterations, per seed point before the clustering + operation terminates (for that seed point), if has not converged yet. + + n_jobs : int, default=None + The number of jobs to use for the computation. The following tasks benefit + from the parallelization: + + - The search of nearest neighbors for bandwidth estimation and label + assignments. See the details in the docstring of the + ``NearestNeighbors`` class. + - Hill-climbing optimization for all seeds. + + See :term:`Glossary ` for more details. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionadded:: 0.17 + Parallel Execution using *n_jobs*. + + Returns + ------- + + cluster_centers : ndarray of shape (n_clusters, n_features) + Coordinates of cluster centers. + + labels : ndarray of shape (n_samples,) + Cluster labels for each point. + + Notes + ----- + For an example, see :ref:`examples/cluster/plot_mean_shift.py + `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import mean_shift + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> cluster_centers, labels = mean_shift(X, bandwidth=2) + >>> cluster_centers + array([[3.33..., 6. ], + [1.33..., 0.66...]]) + >>> labels + array([1, 1, 1, 0, 0, 0]) + """ + model = MeanShift( + bandwidth=bandwidth, + seeds=seeds, + min_bin_freq=min_bin_freq, + bin_seeding=bin_seeding, + cluster_all=cluster_all, + n_jobs=n_jobs, + max_iter=max_iter, + ).fit(X) + return model.cluster_centers_, model.labels_ + + +def get_bin_seeds(X, bin_size, min_bin_freq=1): + """Find seeds for mean_shift. + + Finds seeds by first binning data onto a grid whose lines are + spaced bin_size apart, and then choosing those bins with at least + min_bin_freq points. + + Parameters + ---------- + + X : array-like of shape (n_samples, n_features) + Input points, the same points that will be used in mean_shift. + + bin_size : float + Controls the coarseness of the binning. Smaller values lead + to more seeding (which is computationally more expensive). If you're + not sure how to set this, set it to the value of the bandwidth used + in clustering.mean_shift. + + min_bin_freq : int, default=1 + Only bins with at least min_bin_freq will be selected as seeds. + Raising this value decreases the number of seeds found, which + makes mean_shift computationally cheaper. + + Returns + ------- + bin_seeds : array-like of shape (n_samples, n_features) + Points used as initial kernel positions in clustering.mean_shift. + """ + if bin_size == 0: + return X + + # Bin points + bin_sizes = defaultdict(int) + for point in X: + binned_point = np.round(point / bin_size) + bin_sizes[tuple(binned_point)] += 1 + + # Select only those bins as seeds which have enough members + bin_seeds = np.array( + [point for point, freq in bin_sizes.items() if freq >= min_bin_freq], + dtype=np.float32, + ) + if len(bin_seeds) == len(X): + warnings.warn( + "Binning data failed with provided bin_size=%f, using data points as seeds." + % bin_size + ) + return X + bin_seeds = bin_seeds * bin_size + return bin_seeds + + +class MeanShift(ClusterMixin, BaseEstimator): + """Mean shift clustering using a flat kernel. + + Mean shift clustering aims to discover "blobs" in a smooth density of + samples. It is a centroid-based algorithm, which works by updating + candidates for centroids to be the mean of the points within a given + region. These candidates are then filtered in a post-processing stage to + eliminate near-duplicates to form the final set of centroids. + + Seeding is performed using a binning technique for scalability. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + bandwidth : float, default=None + Bandwidth used in the flat kernel. + + If not given, the bandwidth is estimated using + sklearn.cluster.estimate_bandwidth; see the documentation for that + function for hints on scalability (see also the Notes, below). + + seeds : array-like of shape (n_samples, n_features), default=None + Seeds used to initialize kernels. If not set, + the seeds are calculated by clustering.get_bin_seeds + with bandwidth as the grid size and default values for + other parameters. + + bin_seeding : bool, default=False + If true, initial kernel locations are not locations of all + points, but rather the location of the discretized version of + points, where points are binned onto a grid whose coarseness + corresponds to the bandwidth. Setting this option to True will speed + up the algorithm because fewer seeds will be initialized. + The default value is False. + Ignored if seeds argument is not None. + + min_bin_freq : int, default=1 + To speed up the algorithm, accept only those bins with at least + min_bin_freq points as seeds. + + cluster_all : bool, default=True + If true, then all points are clustered, even those orphans that are + not within any kernel. Orphans are assigned to the nearest kernel. + If false, then orphans are given cluster label -1. + + n_jobs : int, default=None + The number of jobs to use for the computation. The following tasks benefit + from the parallelization: + + - The search of nearest neighbors for bandwidth estimation and label + assignments. See the details in the docstring of the + ``NearestNeighbors`` class. + - Hill-climbing optimization for all seeds. + + See :term:`Glossary ` for more details. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + max_iter : int, default=300 + Maximum number of iterations, per seed point before the clustering + operation terminates (for that seed point), if has not converged yet. + + .. versionadded:: 0.22 + + Attributes + ---------- + cluster_centers_ : ndarray of shape (n_clusters, n_features) + Coordinates of cluster centers. + + labels_ : ndarray of shape (n_samples,) + Labels of each point. + + n_iter_ : int + Maximum number of iterations performed on each seed. + + .. versionadded:: 0.22 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + KMeans : K-Means clustering. + + Notes + ----- + + Scalability: + + Because this implementation uses a flat kernel and + a Ball Tree to look up members of each kernel, the complexity will tend + towards O(T*n*log(n)) in lower dimensions, with n the number of samples + and T the number of points. In higher dimensions the complexity will + tend towards O(T*n^2). + + Scalability can be boosted by using fewer seeds, for example by using + a higher value of min_bin_freq in the get_bin_seeds function. + + Note that the estimate_bandwidth function is much less scalable than the + mean shift algorithm and will be the bottleneck if it is used. + + References + ---------- + + Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward + feature space analysis". IEEE Transactions on Pattern Analysis and + Machine Intelligence. 2002. pp. 603-619. + + Examples + -------- + >>> from sklearn.cluster import MeanShift + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> clustering = MeanShift(bandwidth=2).fit(X) + >>> clustering.labels_ + array([1, 1, 1, 0, 0, 0]) + >>> clustering.predict([[0, 0], [5, 5]]) + array([1, 0]) + >>> clustering + MeanShift(bandwidth=2) + """ + + _parameter_constraints: dict = { + "bandwidth": [Interval(Real, 0, None, closed="neither"), None], + "seeds": ["array-like", None], + "bin_seeding": ["boolean"], + "min_bin_freq": [Interval(Integral, 1, None, closed="left")], + "cluster_all": ["boolean"], + "n_jobs": [Integral, None], + "max_iter": [Interval(Integral, 0, None, closed="left")], + } + + def __init__( + self, + *, + bandwidth=None, + seeds=None, + bin_seeding=False, + min_bin_freq=1, + cluster_all=True, + n_jobs=None, + max_iter=300, + ): + self.bandwidth = bandwidth + self.seeds = seeds + self.bin_seeding = bin_seeding + self.cluster_all = cluster_all + self.min_bin_freq = min_bin_freq + self.n_jobs = n_jobs + self.max_iter = max_iter + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Perform clustering. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Samples to cluster. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Fitted instance. + """ + X = self._validate_data(X) + bandwidth = self.bandwidth + if bandwidth is None: + bandwidth = estimate_bandwidth(X, n_jobs=self.n_jobs) + + seeds = self.seeds + if seeds is None: + if self.bin_seeding: + seeds = get_bin_seeds(X, bandwidth, self.min_bin_freq) + else: + seeds = X + n_samples, n_features = X.shape + center_intensity_dict = {} + + # We use n_jobs=1 because this will be used in nested calls under + # parallel calls to _mean_shift_single_seed so there is no need for + # for further parallelism. + nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X) + + # execute iterations on all seeds in parallel + all_res = Parallel(n_jobs=self.n_jobs)( + delayed(_mean_shift_single_seed)(seed, X, nbrs, self.max_iter) + for seed in seeds + ) + # copy results in a dictionary + for i in range(len(seeds)): + if all_res[i][1]: # i.e. len(points_within) > 0 + center_intensity_dict[all_res[i][0]] = all_res[i][1] + + self.n_iter_ = max([x[2] for x in all_res]) + + if not center_intensity_dict: + # nothing near seeds + raise ValueError( + "No point was within bandwidth=%f of any seed. Try a different seeding" + " strategy or increase the bandwidth." + % bandwidth + ) + + # POST PROCESSING: remove near duplicate points + # If the distance between two kernels is less than the bandwidth, + # then we have to remove one because it is a duplicate. Remove the + # one with fewer points. + + sorted_by_intensity = sorted( + center_intensity_dict.items(), + key=lambda tup: (tup[1], tup[0]), + reverse=True, + ) + sorted_centers = np.array([tup[0] for tup in sorted_by_intensity]) + unique = np.ones(len(sorted_centers), dtype=bool) + nbrs = NearestNeighbors(radius=bandwidth, n_jobs=self.n_jobs).fit( + sorted_centers + ) + for i, center in enumerate(sorted_centers): + if unique[i]: + neighbor_idxs = nbrs.radius_neighbors([center], return_distance=False)[ + 0 + ] + unique[neighbor_idxs] = 0 + unique[i] = 1 # leave the current point as unique + cluster_centers = sorted_centers[unique] + + # ASSIGN LABELS: a point belongs to the cluster that it is closest to + nbrs = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs).fit(cluster_centers) + labels = np.zeros(n_samples, dtype=int) + distances, idxs = nbrs.kneighbors(X) + if self.cluster_all: + labels = idxs.flatten() + else: + labels.fill(-1) + bool_selector = distances.flatten() <= bandwidth + labels[bool_selector] = idxs.flatten()[bool_selector] + + self.cluster_centers_, self.labels_ = cluster_centers, labels + return self + + def predict(self, X): + """Predict the closest cluster each sample in X belongs to. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + New data to predict. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Index of the cluster each sample belongs to. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False) + with config_context(assume_finite=True): + return pairwise_distances_argmin(X, self.cluster_centers_) diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_optics.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_optics.py new file mode 100644 index 0000000000000000000000000000000000000000..493b7f40389cb410ae3f4f456c783440330c438f --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_optics.py @@ -0,0 +1,1199 @@ +"""Ordering Points To Identify the Clustering Structure (OPTICS) + +These routines execute the OPTICS algorithm, and implement various +cluster extraction methods of the ordered list. + +Authors: Shane Grigsby + Adrin Jalali + Erich Schubert + Hanmin Qin +License: BSD 3 clause +""" + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy.sparse import SparseEfficiencyWarning, issparse + +from ..base import BaseEstimator, ClusterMixin, _fit_context +from ..exceptions import DataConversionWarning +from ..metrics import pairwise_distances +from ..metrics.pairwise import _VALID_METRICS, PAIRWISE_BOOLEAN_FUNCTIONS +from ..neighbors import NearestNeighbors +from ..utils import gen_batches, get_chunk_n_rows +from ..utils._param_validation import ( + HasMethods, + Interval, + RealNotInt, + StrOptions, + validate_params, +) +from ..utils.validation import check_memory + + +class OPTICS(ClusterMixin, BaseEstimator): + """Estimate clustering structure from vector array. + + OPTICS (Ordering Points To Identify the Clustering Structure), closely + related to DBSCAN, finds core sample of high density and expands clusters + from them [1]_. Unlike DBSCAN, keeps cluster hierarchy for a variable + neighborhood radius. Better suited for usage on large datasets than the + current sklearn implementation of DBSCAN. + + Clusters are then extracted using a DBSCAN-like method + (cluster_method = 'dbscan') or an automatic + technique proposed in [1]_ (cluster_method = 'xi'). + + This implementation deviates from the original OPTICS by first performing + k-nearest-neighborhood searches on all points to identify core sizes, then + computing only the distances to unprocessed points when constructing the + cluster order. Note that we do not employ a heap to manage the expansion + candidates, so the time complexity will be O(n^2). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + min_samples : int > 1 or float between 0 and 1, default=5 + The number of samples in a neighborhood for a point to be considered as + a core point. Also, up and down steep regions can't have more than + ``min_samples`` consecutive non-steep points. Expressed as an absolute + number or a fraction of the number of samples (rounded to be at least + 2). + + max_eps : float, default=np.inf + The maximum distance between two samples for one to be considered as + in the neighborhood of the other. Default value of ``np.inf`` will + identify clusters across all scales; reducing ``max_eps`` will result + in shorter run times. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Any metric from scikit-learn + or scipy.spatial.distance can be used. + + If metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two arrays as input and return one value indicating the + distance between them. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. If metric is + "precomputed", `X` is assumed to be a distance matrix and must be + square. + + Valid values for metric are: + + - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', + 'manhattan'] + + - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', + 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', + 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao', + 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', + 'yule'] + + Sparse matrices are only supported by scikit-learn metrics. + See the documentation for scipy.spatial.distance for details on these + metrics. + + .. note:: + `'kulsinski'` is deprecated from SciPy 1.9 and will removed in SciPy 1.11. + + p : float, default=2 + Parameter for the Minkowski metric from + :class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + cluster_method : str, default='xi' + The extraction method used to extract clusters using the calculated + reachability and ordering. Possible values are "xi" and "dbscan". + + eps : float, default=None + The maximum distance between two samples for one to be considered as + in the neighborhood of the other. By default it assumes the same value + as ``max_eps``. + Used only when ``cluster_method='dbscan'``. + + xi : float between 0 and 1, default=0.05 + Determines the minimum steepness on the reachability plot that + constitutes a cluster boundary. For example, an upwards point in the + reachability plot is defined by the ratio from one point to its + successor being at most 1-xi. + Used only when ``cluster_method='xi'``. + + predecessor_correction : bool, default=True + Correct clusters according to the predecessors calculated by OPTICS + [2]_. This parameter has minimal effect on most datasets. + Used only when ``cluster_method='xi'``. + + min_cluster_size : int > 1 or float between 0 and 1, default=None + Minimum number of samples in an OPTICS cluster, expressed as an + absolute number or a fraction of the number of samples (rounded to be + at least 2). If ``None``, the value of ``min_samples`` is used instead. + Used only when ``cluster_method='xi'``. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`~sklearn.neighbors.BallTree`. + - 'kd_tree' will use :class:`~sklearn.neighbors.KDTree`. + - 'brute' will use a brute-force search. + - 'auto' (default) will attempt to decide the most appropriate + algorithm based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to :class:`~sklearn.neighbors.BallTree` or + :class:`~sklearn.neighbors.KDTree`. This can affect the speed of the + construction and query, as well as the memory required to store the + tree. The optimal value depends on the nature of the problem. + + memory : str or object with the joblib.Memory interface, default=None + Used to cache the output of the computation of the tree. + By default, no caching is done. If a string is given, it is the + path to the caching directory. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + labels_ : ndarray of shape (n_samples,) + Cluster labels for each point in the dataset given to fit(). + Noisy samples and points which are not included in a leaf cluster + of ``cluster_hierarchy_`` are labeled as -1. + + reachability_ : ndarray of shape (n_samples,) + Reachability distances per sample, indexed by object order. Use + ``clust.reachability_[clust.ordering_]`` to access in cluster order. + + ordering_ : ndarray of shape (n_samples,) + The cluster ordered list of sample indices. + + core_distances_ : ndarray of shape (n_samples,) + Distance at which each sample becomes a core point, indexed by object + order. Points which will never be core have a distance of inf. Use + ``clust.core_distances_[clust.ordering_]`` to access in cluster order. + + predecessor_ : ndarray of shape (n_samples,) + Point that a sample was reached from, indexed by object order. + Seed points have a predecessor of -1. + + cluster_hierarchy_ : ndarray of shape (n_clusters, 2) + The list of clusters in the form of ``[start, end]`` in each row, with + all indices inclusive. The clusters are ordered according to + ``(end, -start)`` (ascending) so that larger clusters encompassing + smaller clusters come after those smaller ones. Since ``labels_`` does + not reflect the hierarchy, usually + ``len(cluster_hierarchy_) > np.unique(optics.labels_)``. Please also + note that these indices are of the ``ordering_``, i.e. + ``X[ordering_][start:end + 1]`` form a cluster. + Only available when ``cluster_method='xi'``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + DBSCAN : A similar clustering for a specified neighborhood radius (eps). + Our implementation is optimized for runtime. + + References + ---------- + .. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel, + and Jörg Sander. "OPTICS: ordering points to identify the clustering + structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60. + + .. [2] Schubert, Erich, Michael Gertz. + "Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of + the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329. + + Examples + -------- + >>> from sklearn.cluster import OPTICS + >>> import numpy as np + >>> X = np.array([[1, 2], [2, 5], [3, 6], + ... [8, 7], [8, 8], [7, 3]]) + >>> clustering = OPTICS(min_samples=2).fit(X) + >>> clustering.labels_ + array([0, 0, 0, 1, 1, 1]) + + For a more detailed example see + :ref:`sphx_glr_auto_examples_cluster_plot_optics.py`. + """ + + _parameter_constraints: dict = { + "min_samples": [ + Interval(Integral, 2, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="both"), + ], + "max_eps": [Interval(Real, 0, None, closed="both")], + "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable], + "p": [Interval(Real, 1, None, closed="left")], + "metric_params": [dict, None], + "cluster_method": [StrOptions({"dbscan", "xi"})], + "eps": [Interval(Real, 0, None, closed="both"), None], + "xi": [Interval(Real, 0, 1, closed="both")], + "predecessor_correction": ["boolean"], + "min_cluster_size": [ + Interval(Integral, 2, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="right"), + None, + ], + "algorithm": [StrOptions({"auto", "brute", "ball_tree", "kd_tree"})], + "leaf_size": [Interval(Integral, 1, None, closed="left")], + "memory": [str, HasMethods("cache"), None], + "n_jobs": [Integral, None], + } + + def __init__( + self, + *, + min_samples=5, + max_eps=np.inf, + metric="minkowski", + p=2, + metric_params=None, + cluster_method="xi", + eps=None, + xi=0.05, + predecessor_correction=True, + min_cluster_size=None, + algorithm="auto", + leaf_size=30, + memory=None, + n_jobs=None, + ): + self.max_eps = max_eps + self.min_samples = min_samples + self.min_cluster_size = min_cluster_size + self.algorithm = algorithm + self.metric = metric + self.metric_params = metric_params + self.p = p + self.leaf_size = leaf_size + self.cluster_method = cluster_method + self.eps = eps + self.xi = xi + self.predecessor_correction = predecessor_correction + self.memory = memory + self.n_jobs = n_jobs + + @_fit_context( + # Optics.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Perform OPTICS clustering. + + Extracts an ordered list of points and reachability distances, and + performs initial clustering using ``max_eps`` distance specified at + OPTICS object instantiation. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \ + (n_samples, n_samples) if metric='precomputed' + A feature array, or array of distances between samples if + metric='precomputed'. If a sparse matrix is provided, it will be + converted into CSR format. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + dtype = bool if self.metric in PAIRWISE_BOOLEAN_FUNCTIONS else float + if dtype == bool and X.dtype != bool: + msg = ( + "Data will be converted to boolean for" + f" metric {self.metric}, to avoid this warning," + " you may convert the data prior to calling fit." + ) + warnings.warn(msg, DataConversionWarning) + + X = self._validate_data(X, dtype=dtype, accept_sparse="csr") + if self.metric == "precomputed" and issparse(X): + with warnings.catch_warnings(): + warnings.simplefilter("ignore", SparseEfficiencyWarning) + # Set each diagonal to an explicit value so each point is its + # own neighbor + X.setdiag(X.diagonal()) + memory = check_memory(self.memory) + + ( + self.ordering_, + self.core_distances_, + self.reachability_, + self.predecessor_, + ) = memory.cache(compute_optics_graph)( + X=X, + min_samples=self.min_samples, + algorithm=self.algorithm, + leaf_size=self.leaf_size, + metric=self.metric, + metric_params=self.metric_params, + p=self.p, + n_jobs=self.n_jobs, + max_eps=self.max_eps, + ) + + # Extract clusters from the calculated orders and reachability + if self.cluster_method == "xi": + labels_, clusters_ = cluster_optics_xi( + reachability=self.reachability_, + predecessor=self.predecessor_, + ordering=self.ordering_, + min_samples=self.min_samples, + min_cluster_size=self.min_cluster_size, + xi=self.xi, + predecessor_correction=self.predecessor_correction, + ) + self.cluster_hierarchy_ = clusters_ + elif self.cluster_method == "dbscan": + if self.eps is None: + eps = self.max_eps + else: + eps = self.eps + + if eps > self.max_eps: + raise ValueError( + "Specify an epsilon smaller than %s. Got %s." % (self.max_eps, eps) + ) + + labels_ = cluster_optics_dbscan( + reachability=self.reachability_, + core_distances=self.core_distances_, + ordering=self.ordering_, + eps=eps, + ) + + self.labels_ = labels_ + return self + + +def _validate_size(size, n_samples, param_name): + if size > n_samples: + raise ValueError( + "%s must be no greater than the number of samples (%d). Got %d" + % (param_name, n_samples, size) + ) + + +# OPTICS helper functions +def _compute_core_distances_(X, neighbors, min_samples, working_memory): + """Compute the k-th nearest neighbor of each sample. + + Equivalent to neighbors.kneighbors(X, self.min_samples)[0][:, -1] + but with more memory efficiency. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + neighbors : NearestNeighbors instance + The fitted nearest neighbors estimator. + working_memory : int, default=None + The sought maximum memory for temporary distance matrix chunks. + When None (default), the value of + ``sklearn.get_config()['working_memory']`` is used. + + Returns + ------- + core_distances : ndarray of shape (n_samples,) + Distance at which each sample becomes a core point. + Points which will never be core have a distance of inf. + """ + n_samples = X.shape[0] + core_distances = np.empty(n_samples) + core_distances.fill(np.nan) + + chunk_n_rows = get_chunk_n_rows( + row_bytes=16 * min_samples, max_n_rows=n_samples, working_memory=working_memory + ) + slices = gen_batches(n_samples, chunk_n_rows) + for sl in slices: + core_distances[sl] = neighbors.kneighbors(X[sl], min_samples)[0][:, -1] + return core_distances + + +@validate_params( + { + "X": [np.ndarray, "sparse matrix"], + "min_samples": [ + Interval(Integral, 2, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="both"), + ], + "max_eps": [Interval(Real, 0, None, closed="both")], + "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable], + "p": [Interval(Real, 0, None, closed="right"), None], + "metric_params": [dict, None], + "algorithm": [StrOptions({"auto", "brute", "ball_tree", "kd_tree"})], + "leaf_size": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + }, + prefer_skip_nested_validation=False, # metric is not validated yet +) +def compute_optics_graph( + X, *, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs +): + """Compute the OPTICS reachability graph. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \ + (n_samples, n_samples) if metric='precomputed' + A feature array, or array of distances between samples if + metric='precomputed'. + + min_samples : int > 1 or float between 0 and 1 + The number of samples in a neighborhood for a point to be considered + as a core point. Expressed as an absolute number or a fraction of the + number of samples (rounded to be at least 2). + + max_eps : float, default=np.inf + The maximum distance between two samples for one to be considered as + in the neighborhood of the other. Default value of ``np.inf`` will + identify clusters across all scales; reducing ``max_eps`` will result + in shorter run times. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Any metric from scikit-learn + or scipy.spatial.distance can be used. + + If metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two arrays as input and return one value indicating the + distance between them. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. If metric is + "precomputed", X is assumed to be a distance matrix and must be square. + + Valid values for metric are: + + - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', + 'manhattan'] + + - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', + 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', + 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao', + 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', + 'yule'] + + See the documentation for scipy.spatial.distance for details on these + metrics. + + .. note:: + `'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11. + + p : float, default=2 + Parameter for the Minkowski metric from + :class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`~sklearn.neighbors.BallTree`. + - 'kd_tree' will use :class:`~sklearn.neighbors.KDTree`. + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to `fit` method. (default) + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to :class:`~sklearn.neighbors.BallTree` or + :class:`~sklearn.neighbors.KDTree`. This can affect the speed of the + construction and query, as well as the memory required to store the + tree. The optimal value depends on the nature of the problem. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + ordering_ : array of shape (n_samples,) + The cluster ordered list of sample indices. + + core_distances_ : array of shape (n_samples,) + Distance at which each sample becomes a core point, indexed by object + order. Points which will never be core have a distance of inf. Use + ``clust.core_distances_[clust.ordering_]`` to access in cluster order. + + reachability_ : array of shape (n_samples,) + Reachability distances per sample, indexed by object order. Use + ``clust.reachability_[clust.ordering_]`` to access in cluster order. + + predecessor_ : array of shape (n_samples,) + Point that a sample was reached from, indexed by object order. + Seed points have a predecessor of -1. + + References + ---------- + .. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel, + and Jörg Sander. "OPTICS: ordering points to identify the clustering + structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import compute_optics_graph + >>> X = np.array([[1, 2], [2, 5], [3, 6], + ... [8, 7], [8, 8], [7, 3]]) + >>> ordering, core_distances, reachability, predecessor = compute_optics_graph( + ... X, + ... min_samples=2, + ... max_eps=np.inf, + ... metric="minkowski", + ... p=2, + ... metric_params=None, + ... algorithm="auto", + ... leaf_size=30, + ... n_jobs=None, + ... ) + >>> ordering + array([0, 1, 2, 5, 3, 4]) + >>> core_distances + array([3.16..., 1.41..., 1.41..., 1. , 1. , + 4.12...]) + >>> reachability + array([ inf, 3.16..., 1.41..., 4.12..., 1. , + 5. ]) + >>> predecessor + array([-1, 0, 1, 5, 3, 2]) + """ + n_samples = X.shape[0] + _validate_size(min_samples, n_samples, "min_samples") + if min_samples <= 1: + min_samples = max(2, int(min_samples * n_samples)) + + # Start all points as 'unprocessed' ## + reachability_ = np.empty(n_samples) + reachability_.fill(np.inf) + predecessor_ = np.empty(n_samples, dtype=int) + predecessor_.fill(-1) + + nbrs = NearestNeighbors( + n_neighbors=min_samples, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + metric_params=metric_params, + p=p, + n_jobs=n_jobs, + ) + + nbrs.fit(X) + # Here we first do a kNN query for each point, this differs from + # the original OPTICS that only used epsilon range queries. + # TODO: handle working_memory somehow? + core_distances_ = _compute_core_distances_( + X=X, neighbors=nbrs, min_samples=min_samples, working_memory=None + ) + # OPTICS puts an upper limit on these, use inf for undefined. + core_distances_[core_distances_ > max_eps] = np.inf + np.around( + core_distances_, + decimals=np.finfo(core_distances_.dtype).precision, + out=core_distances_, + ) + + # Main OPTICS loop. Not parallelizable. The order that entries are + # written to the 'ordering_' list is important! + # Note that this implementation is O(n^2) theoretically, but + # supposedly with very low constant factors. + processed = np.zeros(X.shape[0], dtype=bool) + ordering = np.zeros(X.shape[0], dtype=int) + for ordering_idx in range(X.shape[0]): + # Choose next based on smallest reachability distance + # (And prefer smaller ids on ties, possibly np.inf!) + index = np.where(processed == 0)[0] + point = index[np.argmin(reachability_[index])] + + processed[point] = True + ordering[ordering_idx] = point + if core_distances_[point] != np.inf: + _set_reach_dist( + core_distances_=core_distances_, + reachability_=reachability_, + predecessor_=predecessor_, + point_index=point, + processed=processed, + X=X, + nbrs=nbrs, + metric=metric, + metric_params=metric_params, + p=p, + max_eps=max_eps, + ) + if np.all(np.isinf(reachability_)): + warnings.warn( + ( + "All reachability values are inf. Set a larger" + " max_eps or all data will be considered outliers." + ), + UserWarning, + ) + return ordering, core_distances_, reachability_, predecessor_ + + +def _set_reach_dist( + core_distances_, + reachability_, + predecessor_, + point_index, + processed, + X, + nbrs, + metric, + metric_params, + p, + max_eps, +): + P = X[point_index : point_index + 1] + # Assume that radius_neighbors is faster without distances + # and we don't need all distances, nevertheless, this means + # we may be doing some work twice. + indices = nbrs.radius_neighbors(P, radius=max_eps, return_distance=False)[0] + + # Getting indices of neighbors that have not been processed + unproc = np.compress(~np.take(processed, indices), indices) + # Neighbors of current point are already processed. + if not unproc.size: + return + + # Only compute distances to unprocessed neighbors: + if metric == "precomputed": + dists = X[[point_index], unproc] + if isinstance(dists, np.matrix): + dists = np.asarray(dists) + dists = dists.ravel() + else: + _params = dict() if metric_params is None else metric_params.copy() + if metric == "minkowski" and "p" not in _params: + # the same logic as neighbors, p is ignored if explicitly set + # in the dict params + _params["p"] = p + dists = pairwise_distances(P, X[unproc], metric, n_jobs=None, **_params).ravel() + + rdists = np.maximum(dists, core_distances_[point_index]) + np.around(rdists, decimals=np.finfo(rdists.dtype).precision, out=rdists) + improved = np.where(rdists < np.take(reachability_, unproc)) + reachability_[unproc[improved]] = rdists[improved] + predecessor_[unproc[improved]] = point_index + + +@validate_params( + { + "reachability": [np.ndarray], + "core_distances": [np.ndarray], + "ordering": [np.ndarray], + "eps": [Interval(Real, 0, None, closed="both")], + }, + prefer_skip_nested_validation=True, +) +def cluster_optics_dbscan(*, reachability, core_distances, ordering, eps): + """Perform DBSCAN extraction for an arbitrary epsilon. + + Extracting the clusters runs in linear time. Note that this results in + ``labels_`` which are close to a :class:`~sklearn.cluster.DBSCAN` with + similar settings and ``eps``, only if ``eps`` is close to ``max_eps``. + + Parameters + ---------- + reachability : ndarray of shape (n_samples,) + Reachability distances calculated by OPTICS (``reachability_``). + + core_distances : ndarray of shape (n_samples,) + Distances at which points become core (``core_distances_``). + + ordering : ndarray of shape (n_samples,) + OPTICS ordered point indices (``ordering_``). + + eps : float + DBSCAN ``eps`` parameter. Must be set to < ``max_eps``. Results + will be close to DBSCAN algorithm if ``eps`` and ``max_eps`` are close + to one another. + + Returns + ------- + labels_ : array of shape (n_samples,) + The estimated labels. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import cluster_optics_dbscan, compute_optics_graph + >>> X = np.array([[1, 2], [2, 5], [3, 6], + ... [8, 7], [8, 8], [7, 3]]) + >>> ordering, core_distances, reachability, predecessor = compute_optics_graph( + ... X, + ... min_samples=2, + ... max_eps=np.inf, + ... metric="minkowski", + ... p=2, + ... metric_params=None, + ... algorithm="auto", + ... leaf_size=30, + ... n_jobs=None, + ... ) + >>> eps = 4.5 + >>> labels = cluster_optics_dbscan( + ... reachability=reachability, + ... core_distances=core_distances, + ... ordering=ordering, + ... eps=eps, + ... ) + >>> labels + array([0, 0, 0, 1, 1, 1]) + """ + n_samples = len(core_distances) + labels = np.zeros(n_samples, dtype=int) + + far_reach = reachability > eps + near_core = core_distances <= eps + labels[ordering] = np.cumsum(far_reach[ordering] & near_core[ordering]) - 1 + labels[far_reach & ~near_core] = -1 + return labels + + +@validate_params( + { + "reachability": [np.ndarray], + "predecessor": [np.ndarray], + "ordering": [np.ndarray], + "min_samples": [ + Interval(Integral, 2, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="both"), + ], + "min_cluster_size": [ + Interval(Integral, 2, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="both"), + None, + ], + "xi": [Interval(Real, 0, 1, closed="both")], + "predecessor_correction": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def cluster_optics_xi( + *, + reachability, + predecessor, + ordering, + min_samples, + min_cluster_size=None, + xi=0.05, + predecessor_correction=True, +): + """Automatically extract clusters according to the Xi-steep method. + + Parameters + ---------- + reachability : ndarray of shape (n_samples,) + Reachability distances calculated by OPTICS (`reachability_`). + + predecessor : ndarray of shape (n_samples,) + Predecessors calculated by OPTICS. + + ordering : ndarray of shape (n_samples,) + OPTICS ordered point indices (`ordering_`). + + min_samples : int > 1 or float between 0 and 1 + The same as the min_samples given to OPTICS. Up and down steep regions + can't have more then ``min_samples`` consecutive non-steep points. + Expressed as an absolute number or a fraction of the number of samples + (rounded to be at least 2). + + min_cluster_size : int > 1 or float between 0 and 1, default=None + Minimum number of samples in an OPTICS cluster, expressed as an + absolute number or a fraction of the number of samples (rounded to be + at least 2). If ``None``, the value of ``min_samples`` is used instead. + + xi : float between 0 and 1, default=0.05 + Determines the minimum steepness on the reachability plot that + constitutes a cluster boundary. For example, an upwards point in the + reachability plot is defined by the ratio from one point to its + successor being at most 1-xi. + + predecessor_correction : bool, default=True + Correct clusters based on the calculated predecessors. + + Returns + ------- + labels : ndarray of shape (n_samples,) + The labels assigned to samples. Points which are not included + in any cluster are labeled as -1. + + clusters : ndarray of shape (n_clusters, 2) + The list of clusters in the form of ``[start, end]`` in each row, with + all indices inclusive. The clusters are ordered according to ``(end, + -start)`` (ascending) so that larger clusters encompassing smaller + clusters come after such nested smaller clusters. Since ``labels`` does + not reflect the hierarchy, usually ``len(clusters) > + np.unique(labels)``. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cluster import cluster_optics_xi, compute_optics_graph + >>> X = np.array([[1, 2], [2, 5], [3, 6], + ... [8, 7], [8, 8], [7, 3]]) + >>> ordering, core_distances, reachability, predecessor = compute_optics_graph( + ... X, + ... min_samples=2, + ... max_eps=np.inf, + ... metric="minkowski", + ... p=2, + ... metric_params=None, + ... algorithm="auto", + ... leaf_size=30, + ... n_jobs=None + ... ) + >>> min_samples = 2 + >>> labels, clusters = cluster_optics_xi( + ... reachability=reachability, + ... predecessor=predecessor, + ... ordering=ordering, + ... min_samples=min_samples, + ... ) + >>> labels + array([0, 0, 0, 1, 1, 1]) + >>> clusters + array([[0, 2], + [3, 5], + [0, 5]]) + """ + n_samples = len(reachability) + _validate_size(min_samples, n_samples, "min_samples") + if min_samples <= 1: + min_samples = max(2, int(min_samples * n_samples)) + if min_cluster_size is None: + min_cluster_size = min_samples + _validate_size(min_cluster_size, n_samples, "min_cluster_size") + if min_cluster_size <= 1: + min_cluster_size = max(2, int(min_cluster_size * n_samples)) + + clusters = _xi_cluster( + reachability[ordering], + predecessor[ordering], + ordering, + xi, + min_samples, + min_cluster_size, + predecessor_correction, + ) + labels = _extract_xi_labels(ordering, clusters) + return labels, clusters + + +def _extend_region(steep_point, xward_point, start, min_samples): + """Extend the area until it's maximal. + + It's the same function for both upward and downward reagions, depending on + the given input parameters. Assuming: + + - steep_{upward/downward}: bool array indicating whether a point is a + steep {upward/downward}; + - upward/downward: bool array indicating whether a point is + upward/downward; + + To extend an upward reagion, ``steep_point=steep_upward`` and + ``xward_point=downward`` are expected, and to extend a downward region, + ``steep_point=steep_downward`` and ``xward_point=upward``. + + Parameters + ---------- + steep_point : ndarray of shape (n_samples,), dtype=bool + True if the point is steep downward (upward). + + xward_point : ndarray of shape (n_samples,), dtype=bool + True if the point is an upward (respectively downward) point. + + start : int + The start of the xward region. + + min_samples : int + The same as the min_samples given to OPTICS. Up and down steep + regions can't have more then ``min_samples`` consecutive non-steep + points. + + Returns + ------- + index : int + The current index iterating over all the samples, i.e. where we are up + to in our search. + + end : int + The end of the region, which can be behind the index. The region + includes the ``end`` index. + """ + n_samples = len(steep_point) + non_xward_points = 0 + index = start + end = start + # find a maximal area + while index < n_samples: + if steep_point[index]: + non_xward_points = 0 + end = index + elif not xward_point[index]: + # it's not a steep point, but still goes up. + non_xward_points += 1 + # region should include no more than min_samples consecutive + # non steep xward points. + if non_xward_points > min_samples: + break + else: + return end + index += 1 + return end + + +def _update_filter_sdas(sdas, mib, xi_complement, reachability_plot): + """Update steep down areas (SDAs) using the new maximum in between (mib) + value, and the given complement of xi, i.e. ``1 - xi``. + """ + if np.isinf(mib): + return [] + res = [ + sda for sda in sdas if mib <= reachability_plot[sda["start"]] * xi_complement + ] + for sda in res: + sda["mib"] = max(sda["mib"], mib) + return res + + +def _correct_predecessor(reachability_plot, predecessor_plot, ordering, s, e): + """Correct for predecessors. + + Applies Algorithm 2 of [1]_. + + Input parameters are ordered by the computer OPTICS ordering. + + .. [1] Schubert, Erich, Michael Gertz. + "Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of + the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329. + """ + while s < e: + if reachability_plot[s] > reachability_plot[e]: + return s, e + p_e = predecessor_plot[e] + for i in range(s, e): + if p_e == ordering[i]: + return s, e + e -= 1 + return None, None + + +def _xi_cluster( + reachability_plot, + predecessor_plot, + ordering, + xi, + min_samples, + min_cluster_size, + predecessor_correction, +): + """Automatically extract clusters according to the Xi-steep method. + + This is rouphly an implementation of Figure 19 of the OPTICS paper. + + Parameters + ---------- + reachability_plot : array-like of shape (n_samples,) + The reachability plot, i.e. reachability ordered according to + the calculated ordering, all computed by OPTICS. + + predecessor_plot : array-like of shape (n_samples,) + Predecessors ordered according to the calculated ordering. + + xi : float, between 0 and 1 + Determines the minimum steepness on the reachability plot that + constitutes a cluster boundary. For example, an upwards point in the + reachability plot is defined by the ratio from one point to its + successor being at most 1-xi. + + min_samples : int > 1 + The same as the min_samples given to OPTICS. Up and down steep regions + can't have more then ``min_samples`` consecutive non-steep points. + + min_cluster_size : int > 1 + Minimum number of samples in an OPTICS cluster. + + predecessor_correction : bool + Correct clusters based on the calculated predecessors. + + Returns + ------- + clusters : ndarray of shape (n_clusters, 2) + The list of clusters in the form of [start, end] in each row, with all + indices inclusive. The clusters are ordered in a way that larger + clusters encompassing smaller clusters come after those smaller + clusters. + """ + + # Our implementation adds an inf to the end of reachability plot + # this helps to find potential clusters at the end of the + # reachability plot even if there's no upward region at the end of it. + reachability_plot = np.hstack((reachability_plot, np.inf)) + + xi_complement = 1 - xi + sdas = [] # steep down areas, introduced in section 4.3.2 of the paper + clusters = [] + index = 0 + mib = 0.0 # maximum in between, section 4.3.2 + + # Our implementation corrects a mistake in the original + # paper, i.e., in Definition 9 steep downward point, + # r(p) * (1 - x1) <= r(p + 1) should be + # r(p) * (1 - x1) >= r(p + 1) + with np.errstate(invalid="ignore"): + ratio = reachability_plot[:-1] / reachability_plot[1:] + steep_upward = ratio <= xi_complement + steep_downward = ratio >= 1 / xi_complement + downward = ratio > 1 + upward = ratio < 1 + + # the following loop is almost exactly as Figure 19 of the paper. + # it jumps over the areas which are not either steep down or up areas + for steep_index in iter(np.flatnonzero(steep_upward | steep_downward)): + # just continue if steep_index has been a part of a discovered xward + # area. + if steep_index < index: + continue + + mib = max(mib, np.max(reachability_plot[index : steep_index + 1])) + + # steep downward areas + if steep_downward[steep_index]: + sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability_plot) + D_start = steep_index + D_end = _extend_region(steep_downward, upward, D_start, min_samples) + D = {"start": D_start, "end": D_end, "mib": 0.0} + sdas.append(D) + index = D_end + 1 + mib = reachability_plot[index] + + # steep upward areas + else: + sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability_plot) + U_start = steep_index + U_end = _extend_region(steep_upward, downward, U_start, min_samples) + index = U_end + 1 + mib = reachability_plot[index] + + U_clusters = [] + for D in sdas: + c_start = D["start"] + c_end = U_end + + # line (**), sc2* + if reachability_plot[c_end + 1] * xi_complement < D["mib"]: + continue + + # Definition 11: criterion 4 + D_max = reachability_plot[D["start"]] + if D_max * xi_complement >= reachability_plot[c_end + 1]: + # Find the first index from the left side which is almost + # at the same level as the end of the detected cluster. + while ( + reachability_plot[c_start + 1] > reachability_plot[c_end + 1] + and c_start < D["end"] + ): + c_start += 1 + elif reachability_plot[c_end + 1] * xi_complement >= D_max: + # Find the first index from the right side which is almost + # at the same level as the beginning of the detected + # cluster. + # Our implementation corrects a mistake in the original + # paper, i.e., in Definition 11 4c, r(x) < r(sD) should be + # r(x) > r(sD). + while reachability_plot[c_end - 1] > D_max and c_end > U_start: + c_end -= 1 + + # predecessor correction + if predecessor_correction: + c_start, c_end = _correct_predecessor( + reachability_plot, predecessor_plot, ordering, c_start, c_end + ) + if c_start is None: + continue + + # Definition 11: criterion 3.a + if c_end - c_start + 1 < min_cluster_size: + continue + + # Definition 11: criterion 1 + if c_start > D["end"]: + continue + + # Definition 11: criterion 2 + if c_end < U_start: + continue + + U_clusters.append((c_start, c_end)) + + # add smaller clusters first. + U_clusters.reverse() + clusters.extend(U_clusters) + + return np.array(clusters) + + +def _extract_xi_labels(ordering, clusters): + """Extracts the labels from the clusters returned by `_xi_cluster`. + We rely on the fact that clusters are stored + with the smaller clusters coming before the larger ones. + + Parameters + ---------- + ordering : array-like of shape (n_samples,) + The ordering of points calculated by OPTICS + + clusters : array-like of shape (n_clusters, 2) + List of clusters i.e. (start, end) tuples, + as returned by `_xi_cluster`. + + Returns + ------- + labels : ndarray of shape (n_samples,) + """ + + labels = np.full(len(ordering), -1, dtype=int) + label = 0 + for c in clusters: + if not np.any(labels[c[0] : (c[1] + 1)] != -1): + labels[c[0] : (c[1] + 1)] = label + label += 1 + labels[ordering] = labels.copy() + return labels diff --git a/venv/lib/python3.10/site-packages/sklearn/cluster/_spectral.py b/venv/lib/python3.10/site-packages/sklearn/cluster/_spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..d925a2ff56bc4e260633f2076358617dacb1c4c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/cluster/_spectral.py @@ -0,0 +1,799 @@ +"""Algorithms for spectral clustering""" + +# Author: Gael Varoquaux +# Brian Cheung +# Wei LI +# Andrew Knyazev +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy.linalg import LinAlgError, qr, svd +from scipy.sparse import csc_matrix + +from ..base import BaseEstimator, ClusterMixin, _fit_context +from ..manifold import spectral_embedding +from ..metrics.pairwise import KERNEL_PARAMS, pairwise_kernels +from ..neighbors import NearestNeighbors, kneighbors_graph +from ..utils import as_float_array, check_random_state +from ..utils._param_validation import Interval, StrOptions, validate_params +from ._kmeans import k_means + + +def cluster_qr(vectors): + """Find the discrete partition closest to the eigenvector embedding. + + This implementation was proposed in [1]_. + + .. versionadded:: 1.1 + + Parameters + ---------- + vectors : array-like, shape: (n_samples, n_clusters) + The embedding space of the samples. + + Returns + ------- + labels : array of integers, shape: n_samples + The cluster labels of vectors. + + References + ---------- + .. [1] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019 + Anil Damle, Victor Minden, Lexing Ying + <10.1093/imaiai/iay008>` + + """ + + k = vectors.shape[1] + _, _, piv = qr(vectors.T, pivoting=True) + ut, _, v = svd(vectors[piv[:k], :].T) + vectors = abs(np.dot(vectors, np.dot(ut, v.conj()))) + return vectors.argmax(axis=1) + + +def discretize( + vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None +): + """Search for a partition matrix which is closest to the eigenvector embedding. + + This implementation was proposed in [1]_. + + Parameters + ---------- + vectors : array-like of shape (n_samples, n_clusters) + The embedding space of the samples. + + copy : bool, default=True + Whether to copy vectors, or perform in-place normalization. + + max_svd_restarts : int, default=30 + Maximum number of attempts to restart SVD if convergence fails + + n_iter_max : int, default=30 + Maximum number of iterations to attempt in rotation and partition + matrix search if machine precision convergence is not reached + + random_state : int, RandomState instance, default=None + Determines random number generation for rotation matrix initialization. + Use an int to make the randomness deterministic. + See :term:`Glossary `. + + Returns + ------- + labels : array of integers, shape: n_samples + The labels of the clusters. + + References + ---------- + + .. [1] `Multiclass spectral clustering, 2003 + Stella X. Yu, Jianbo Shi + `_ + + Notes + ----- + + The eigenvector embedding is used to iteratively search for the + closest discrete partition. First, the eigenvector embedding is + normalized to the space of partition matrices. An optimal discrete + partition matrix closest to this normalized embedding multiplied by + an initial rotation is calculated. Fixing this discrete partition + matrix, an optimal rotation matrix is calculated. These two + calculations are performed until convergence. The discrete partition + matrix is returned as the clustering solution. Used in spectral + clustering, this method tends to be faster and more robust to random + initialization than k-means. + + """ + + random_state = check_random_state(random_state) + + vectors = as_float_array(vectors, copy=copy) + + eps = np.finfo(float).eps + n_samples, n_components = vectors.shape + + # Normalize the eigenvectors to an equal length of a vector of ones. + # Reorient the eigenvectors to point in the negative direction with respect + # to the first element. This may have to do with constraining the + # eigenvectors to lie in a specific quadrant to make the discretization + # search easier. + norm_ones = np.sqrt(n_samples) + for i in range(vectors.shape[1]): + vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) * norm_ones + if vectors[0, i] != 0: + vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i]) + + # Normalize the rows of the eigenvectors. Samples should lie on the unit + # hypersphere centered at the origin. This transforms the samples in the + # embedding space to the space of partition matrices. + vectors = vectors / np.sqrt((vectors**2).sum(axis=1))[:, np.newaxis] + + svd_restarts = 0 + has_converged = False + + # If there is an exception we try to randomize and rerun SVD again + # do this max_svd_restarts times. + while (svd_restarts < max_svd_restarts) and not has_converged: + # Initialize first column of rotation matrix with a row of the + # eigenvectors + rotation = np.zeros((n_components, n_components)) + rotation[:, 0] = vectors[random_state.randint(n_samples), :].T + + # To initialize the rest of the rotation matrix, find the rows + # of the eigenvectors that are as orthogonal to each other as + # possible + c = np.zeros(n_samples) + for j in range(1, n_components): + # Accumulate c to ensure row is as orthogonal as possible to + # previous picks as well as current one + c += np.abs(np.dot(vectors, rotation[:, j - 1])) + rotation[:, j] = vectors[c.argmin(), :].T + + last_objective_value = 0.0 + n_iter = 0 + + while not has_converged: + n_iter += 1 + + t_discrete = np.dot(vectors, rotation) + + labels = t_discrete.argmax(axis=1) + vectors_discrete = csc_matrix( + (np.ones(len(labels)), (np.arange(0, n_samples), labels)), + shape=(n_samples, n_components), + ) + + t_svd = vectors_discrete.T * vectors + + try: + U, S, Vh = np.linalg.svd(t_svd) + except LinAlgError: + svd_restarts += 1 + print("SVD did not converge, randomizing and trying again") + break + + ncut_value = 2.0 * (n_samples - S.sum()) + if (abs(ncut_value - last_objective_value) < eps) or (n_iter > n_iter_max): + has_converged = True + else: + # otherwise calculate rotation and continue + last_objective_value = ncut_value + rotation = np.dot(Vh.T, U.T) + + if not has_converged: + raise LinAlgError("SVD did not converge") + return labels + + +@validate_params( + {"affinity": ["array-like", "sparse matrix"]}, + prefer_skip_nested_validation=False, +) +def spectral_clustering( + affinity, + *, + n_clusters=8, + n_components=None, + eigen_solver=None, + random_state=None, + n_init=10, + eigen_tol="auto", + assign_labels="kmeans", + verbose=False, +): + """Apply clustering to a projection of the normalized Laplacian. + + In practice Spectral Clustering is very useful when the structure of + the individual clusters is highly non-convex or more generally when + a measure of the center and spread of the cluster is not a suitable + description of the complete cluster. For instance, when clusters are + nested circles on the 2D plane. + + If affinity is the adjacency matrix of a graph, this method can be + used to find normalized graph cuts [1]_, [2]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + affinity : {array-like, sparse matrix} of shape (n_samples, n_samples) + The affinity matrix describing the relationship of the samples to + embed. **Must be symmetric**. + + Possible examples: + - adjacency matrix of a graph, + - heat kernel of the pairwise distance matrix of the samples, + - symmetric k-nearest neighbours connectivity matrix of the samples. + + n_clusters : int, default=None + Number of clusters to extract. + + n_components : int, default=n_clusters + Number of eigenvectors to use for the spectral embedding. + + eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} + The eigenvalue decomposition method. If None then ``'arpack'`` is used. + See [4]_ for more details regarding ``'lobpcg'``. + Eigensolver ``'amg'`` runs ``'lobpcg'`` with optional + Algebraic MultiGrid preconditioning and requires pyamg to be installed. + It can be faster on very large sparse problems [6]_ and [7]_. + + random_state : int, RandomState instance, default=None + A pseudo random number generator used for the initialization + of the lobpcg eigenvectors decomposition when `eigen_solver == + 'amg'`, and for the K-Means initialization. Use an int to make + the results deterministic across calls (See + :term:`Glossary `). + + .. note:: + When using `eigen_solver == 'amg'`, + it is necessary to also fix the global numpy seed with + `np.random.seed(int)` to get deterministic results. See + https://github.com/pyamg/pyamg/issues/139 for further + information. + + n_init : int, default=10 + Number of time the k-means algorithm will be run with different + centroid seeds. The final results will be the best output of n_init + consecutive runs in terms of inertia. Only used if + ``assign_labels='kmeans'``. + + eigen_tol : float, default="auto" + Stopping criterion for eigendecomposition of the Laplacian matrix. + If `eigen_tol="auto"` then the passed tolerance will depend on the + `eigen_solver`: + + - If `eigen_solver="arpack"`, then `eigen_tol=0.0`; + - If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then + `eigen_tol=None` which configures the underlying `lobpcg` solver to + automatically resolve the value according to their heuristics. See, + :func:`scipy.sparse.linalg.lobpcg` for details. + + Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"` + values of `tol<1e-5` may lead to convergence issues and should be + avoided. + + .. versionadded:: 1.2 + Added 'auto' option. + + assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans' + The strategy to use to assign labels in the embedding + space. There are three ways to assign labels after the Laplacian + embedding. k-means can be applied and is a popular choice. But it can + also be sensitive to initialization. Discretization is another + approach which is less sensitive to random initialization [3]_. + The cluster_qr method [5]_ directly extracts clusters from eigenvectors + in spectral clustering. In contrast to k-means and discretization, cluster_qr + has no tuning parameters and is not an iterative method, yet may outperform + k-means and discretization in terms of both quality and speed. + + .. versionchanged:: 1.1 + Added new labeling method 'cluster_qr'. + + verbose : bool, default=False + Verbosity mode. + + .. versionadded:: 0.24 + + Returns + ------- + labels : array of integers, shape: n_samples + The labels of the clusters. + + Notes + ----- + The graph should contain only one connected component, elsewhere + the results make little sense. + + This algorithm solves the normalized cut for `k=2`: it is a + normalized spectral clustering. + + References + ---------- + + .. [1] :doi:`Normalized cuts and image segmentation, 2000 + Jianbo Shi, Jitendra Malik + <10.1109/34.868688>` + + .. [2] :doi:`A Tutorial on Spectral Clustering, 2007 + Ulrike von Luxburg + <10.1007/s11222-007-9033-z>` + + .. [3] `Multiclass spectral clustering, 2003 + Stella X. Yu, Jianbo Shi + `_ + + .. [4] :doi:`Toward the Optimal Preconditioned Eigensolver: + Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001 + A. V. Knyazev + SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541. + <10.1137/S1064827500366124>` + + .. [5] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019 + Anil Damle, Victor Minden, Lexing Ying + <10.1093/imaiai/iay008>` + + .. [6] :doi:`Multiscale Spectral Image Segmentation Multiscale preconditioning + for computing eigenvalues of graph Laplacians in image segmentation, 2006 + Andrew Knyazev + <10.13140/RG.2.2.35280.02565>` + + .. [7] :doi:`Preconditioned spectral clustering for stochastic block partition + streaming graph challenge (Preliminary version at arXiv.) + David Zhuzhunashvili, Andrew Knyazev + <10.1109/HPEC.2017.8091045>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics.pairwise import pairwise_kernels + >>> from sklearn.cluster import spectral_clustering + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> affinity = pairwise_kernels(X, metric='rbf') + >>> spectral_clustering( + ... affinity=affinity, n_clusters=2, assign_labels="discretize", random_state=0 + ... ) + array([1, 1, 1, 0, 0, 0]) + """ + + clusterer = SpectralClustering( + n_clusters=n_clusters, + n_components=n_components, + eigen_solver=eigen_solver, + random_state=random_state, + n_init=n_init, + affinity="precomputed", + eigen_tol=eigen_tol, + assign_labels=assign_labels, + verbose=verbose, + ).fit(affinity) + + return clusterer.labels_ + + +class SpectralClustering(ClusterMixin, BaseEstimator): + """Apply clustering to a projection of the normalized Laplacian. + + In practice Spectral Clustering is very useful when the structure of + the individual clusters is highly non-convex, or more generally when + a measure of the center and spread of the cluster is not a suitable + description of the complete cluster, such as when clusters are + nested circles on the 2D plane. + + If the affinity matrix is the adjacency matrix of a graph, this method + can be used to find normalized graph cuts [1]_, [2]_. + + When calling ``fit``, an affinity matrix is constructed using either + a kernel function such the Gaussian (aka RBF) kernel with Euclidean + distance ``d(X, X)``:: + + np.exp(-gamma * d(X,X) ** 2) + + or a k-nearest neighbors connectivity matrix. + + Alternatively, a user-provided affinity matrix can be specified by + setting ``affinity='precomputed'``. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_clusters : int, default=8 + The dimension of the projection subspace. + + eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None + The eigenvalue decomposition strategy to use. AMG requires pyamg + to be installed. It can be faster on very large, sparse problems, + but may also lead to instabilities. If None, then ``'arpack'`` is + used. See [4]_ for more details regarding `'lobpcg'`. + + n_components : int, default=None + Number of eigenvectors to use for the spectral embedding. If None, + defaults to `n_clusters`. + + random_state : int, RandomState instance, default=None + A pseudo random number generator used for the initialization + of the lobpcg eigenvectors decomposition when `eigen_solver == + 'amg'`, and for the K-Means initialization. Use an int to make + the results deterministic across calls (See + :term:`Glossary `). + + .. note:: + When using `eigen_solver == 'amg'`, + it is necessary to also fix the global numpy seed with + `np.random.seed(int)` to get deterministic results. See + https://github.com/pyamg/pyamg/issues/139 for further + information. + + n_init : int, default=10 + Number of time the k-means algorithm will be run with different + centroid seeds. The final results will be the best output of n_init + consecutive runs in terms of inertia. Only used if + ``assign_labels='kmeans'``. + + gamma : float, default=1.0 + Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels. + Ignored for ``affinity='nearest_neighbors'``. + + affinity : str or callable, default='rbf' + How to construct the affinity matrix. + - 'nearest_neighbors': construct the affinity matrix by computing a + graph of nearest neighbors. + - 'rbf': construct the affinity matrix using a radial basis function + (RBF) kernel. + - 'precomputed': interpret ``X`` as a precomputed affinity matrix, + where larger values indicate greater similarity between instances. + - 'precomputed_nearest_neighbors': interpret ``X`` as a sparse graph + of precomputed distances, and construct a binary affinity matrix + from the ``n_neighbors`` nearest neighbors of each instance. + - one of the kernels supported by + :func:`~sklearn.metrics.pairwise.pairwise_kernels`. + + Only kernels that produce similarity scores (non-negative values that + increase with similarity) should be used. This property is not checked + by the clustering algorithm. + + n_neighbors : int, default=10 + Number of neighbors to use when constructing the affinity matrix using + the nearest neighbors method. Ignored for ``affinity='rbf'``. + + eigen_tol : float, default="auto" + Stopping criterion for eigen decomposition of the Laplacian matrix. + If `eigen_tol="auto"` then the passed tolerance will depend on the + `eigen_solver`: + + - If `eigen_solver="arpack"`, then `eigen_tol=0.0`; + - If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then + `eigen_tol=None` which configures the underlying `lobpcg` solver to + automatically resolve the value according to their heuristics. See, + :func:`scipy.sparse.linalg.lobpcg` for details. + + Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"` + values of `tol<1e-5` may lead to convergence issues and should be + avoided. + + .. versionadded:: 1.2 + Added 'auto' option. + + assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans' + The strategy for assigning labels in the embedding space. There are two + ways to assign labels after the Laplacian embedding. k-means is a + popular choice, but it can be sensitive to initialization. + Discretization is another approach which is less sensitive to random + initialization [3]_. + The cluster_qr method [5]_ directly extract clusters from eigenvectors + in spectral clustering. In contrast to k-means and discretization, cluster_qr + has no tuning parameters and runs no iterations, yet may outperform + k-means and discretization in terms of both quality and speed. + + .. versionchanged:: 1.1 + Added new labeling method 'cluster_qr'. + + degree : float, default=3 + Degree of the polynomial kernel. Ignored by other kernels. + + coef0 : float, default=1 + Zero coefficient for polynomial and sigmoid kernels. + Ignored by other kernels. + + kernel_params : dict of str to any, default=None + Parameters (keyword arguments) and values for kernel passed as + callable object. Ignored by other kernels. + + n_jobs : int, default=None + The number of parallel jobs to run when `affinity='nearest_neighbors'` + or `affinity='precomputed_nearest_neighbors'`. The neighbors search + will be done in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : bool, default=False + Verbosity mode. + + .. versionadded:: 0.24 + + Attributes + ---------- + affinity_matrix_ : array-like of shape (n_samples, n_samples) + Affinity matrix used for clustering. Available only after calling + ``fit``. + + labels_ : ndarray of shape (n_samples,) + Labels of each point + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.cluster.KMeans : K-Means clustering. + sklearn.cluster.DBSCAN : Density-Based Spatial Clustering of + Applications with Noise. + + Notes + ----- + A distance matrix for which 0 indicates identical elements and high values + indicate very dissimilar elements can be transformed into an affinity / + similarity matrix that is well-suited for the algorithm by + applying the Gaussian (aka RBF, heat) kernel:: + + np.exp(- dist_matrix ** 2 / (2. * delta ** 2)) + + where ``delta`` is a free parameter representing the width of the Gaussian + kernel. + + An alternative is to take a symmetric version of the k-nearest neighbors + connectivity matrix of the points. + + If the pyamg package is installed, it is used: this greatly + speeds up computation. + + References + ---------- + .. [1] :doi:`Normalized cuts and image segmentation, 2000 + Jianbo Shi, Jitendra Malik + <10.1109/34.868688>` + + .. [2] :doi:`A Tutorial on Spectral Clustering, 2007 + Ulrike von Luxburg + <10.1007/s11222-007-9033-z>` + + .. [3] `Multiclass spectral clustering, 2003 + Stella X. Yu, Jianbo Shi + `_ + + .. [4] :doi:`Toward the Optimal Preconditioned Eigensolver: + Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001 + A. V. Knyazev + SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541. + <10.1137/S1064827500366124>` + + .. [5] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019 + Anil Damle, Victor Minden, Lexing Ying + <10.1093/imaiai/iay008>` + + Examples + -------- + >>> from sklearn.cluster import SpectralClustering + >>> import numpy as np + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> clustering = SpectralClustering(n_clusters=2, + ... assign_labels='discretize', + ... random_state=0).fit(X) + >>> clustering.labels_ + array([1, 1, 1, 0, 0, 0]) + >>> clustering + SpectralClustering(assign_labels='discretize', n_clusters=2, + random_state=0) + """ + + _parameter_constraints: dict = { + "n_clusters": [Interval(Integral, 1, None, closed="left")], + "eigen_solver": [StrOptions({"arpack", "lobpcg", "amg"}), None], + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "random_state": ["random_state"], + "n_init": [Interval(Integral, 1, None, closed="left")], + "gamma": [Interval(Real, 0, None, closed="left")], + "affinity": [ + callable, + StrOptions( + set(KERNEL_PARAMS) + | {"nearest_neighbors", "precomputed", "precomputed_nearest_neighbors"} + ), + ], + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "eigen_tol": [ + Interval(Real, 0.0, None, closed="left"), + StrOptions({"auto"}), + ], + "assign_labels": [StrOptions({"kmeans", "discretize", "cluster_qr"})], + "degree": [Interval(Real, 0, None, closed="left")], + "coef0": [Interval(Real, None, None, closed="neither")], + "kernel_params": [dict, None], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + } + + def __init__( + self, + n_clusters=8, + *, + eigen_solver=None, + n_components=None, + random_state=None, + n_init=10, + gamma=1.0, + affinity="rbf", + n_neighbors=10, + eigen_tol="auto", + assign_labels="kmeans", + degree=3, + coef0=1, + kernel_params=None, + n_jobs=None, + verbose=False, + ): + self.n_clusters = n_clusters + self.eigen_solver = eigen_solver + self.n_components = n_components + self.random_state = random_state + self.n_init = n_init + self.gamma = gamma + self.affinity = affinity + self.n_neighbors = n_neighbors + self.eigen_tol = eigen_tol + self.assign_labels = assign_labels + self.degree = degree + self.coef0 = coef0 + self.kernel_params = kernel_params + self.n_jobs = n_jobs + self.verbose = verbose + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Perform spectral clustering from features, or affinity matrix. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) + Training instances to cluster, similarities / affinities between + instances if ``affinity='precomputed'``, or distances between + instances if ``affinity='precomputed_nearest_neighbors``. If a + sparse matrix is provided in a format other than ``csr_matrix``, + ``csc_matrix``, or ``coo_matrix``, it will be converted into a + sparse ``csr_matrix``. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + A fitted instance of the estimator. + """ + X = self._validate_data( + X, + accept_sparse=["csr", "csc", "coo"], + dtype=np.float64, + ensure_min_samples=2, + ) + allow_squared = self.affinity in [ + "precomputed", + "precomputed_nearest_neighbors", + ] + if X.shape[0] == X.shape[1] and not allow_squared: + warnings.warn( + "The spectral clustering API has changed. ``fit``" + "now constructs an affinity matrix from data. To use" + " a custom affinity matrix, " + "set ``affinity=precomputed``." + ) + + if self.affinity == "nearest_neighbors": + connectivity = kneighbors_graph( + X, n_neighbors=self.n_neighbors, include_self=True, n_jobs=self.n_jobs + ) + self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T) + elif self.affinity == "precomputed_nearest_neighbors": + estimator = NearestNeighbors( + n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed" + ).fit(X) + connectivity = estimator.kneighbors_graph(X=X, mode="connectivity") + self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T) + elif self.affinity == "precomputed": + self.affinity_matrix_ = X + else: + params = self.kernel_params + if params is None: + params = {} + if not callable(self.affinity): + params["gamma"] = self.gamma + params["degree"] = self.degree + params["coef0"] = self.coef0 + self.affinity_matrix_ = pairwise_kernels( + X, metric=self.affinity, filter_params=True, **params + ) + + random_state = check_random_state(self.random_state) + n_components = ( + self.n_clusters if self.n_components is None else self.n_components + ) + # We now obtain the real valued solution matrix to the + # relaxed Ncut problem, solving the eigenvalue problem + # L_sym x = lambda x and recovering u = D^-1/2 x. + # The first eigenvector is constant only for fully connected graphs + # and should be kept for spectral clustering (drop_first = False) + # See spectral_embedding documentation. + maps = spectral_embedding( + self.affinity_matrix_, + n_components=n_components, + eigen_solver=self.eigen_solver, + random_state=random_state, + eigen_tol=self.eigen_tol, + drop_first=False, + ) + if self.verbose: + print(f"Computing label assignment using {self.assign_labels}") + + if self.assign_labels == "kmeans": + _, self.labels_, _ = k_means( + maps, + self.n_clusters, + random_state=random_state, + n_init=self.n_init, + verbose=self.verbose, + ) + elif self.assign_labels == "cluster_qr": + self.labels_ = cluster_qr(maps) + else: + self.labels_ = discretize(maps, random_state=random_state) + + return self + + def fit_predict(self, X, y=None): + """Perform spectral clustering on `X` and return cluster labels. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) + Training instances to cluster, similarities / affinities between + instances if ``affinity='precomputed'``, or distances between + instances if ``affinity='precomputed_nearest_neighbors``. If a + sparse matrix is provided in a format other than ``csr_matrix``, + ``csc_matrix``, or ``coo_matrix``, it will be converted into a + sparse ``csr_matrix``. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + labels : ndarray of shape (n_samples,) + Cluster labels. + """ + return super().fit_predict(X, y) + + def _more_tags(self): + return { + "pairwise": self.affinity in [ + "precomputed", + "precomputed_nearest_neighbors", + ] + } diff --git a/venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a617a261f7b0fb923726c79af65621ac9f29337 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11cc118ae1204ad0e1bd5caea5a1a37befbafb19 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb9e80658526c12db793bedd340ea12fd2162579 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/compose/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/compose/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e993b025844d850aebc5d928e1c4be15330626a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6eb886c80e42445cef0bf8e1e0739d9b637e2099 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3471989044c1bb8deb32621bec1a677a394a9e9d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py b/venv/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..e21c1a17010efcba076b4758eb024e0ef760b073 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py @@ -0,0 +1,2582 @@ +""" +Test the ColumnTransformer. +""" + +import pickle +import re +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy import sparse + +from sklearn.base import BaseEstimator, TransformerMixin +from sklearn.compose import ( + ColumnTransformer, + make_column_selector, + make_column_transformer, +) +from sklearn.exceptions import NotFittedError +from sklearn.feature_selection import VarianceThreshold +from sklearn.preprocessing import ( + FunctionTransformer, + Normalizer, + OneHotEncoder, + StandardScaler, +) +from sklearn.tests.metadata_routing_common import ( + ConsumingTransformer, + _Registry, + check_recorded_metadata, +) +from sklearn.utils._testing import ( + _convert_container, + assert_allclose_dense_sparse, + assert_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +class Trans(TransformerMixin, BaseEstimator): + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + # 1D Series -> 2D DataFrame + if hasattr(X, "to_frame"): + return X.to_frame() + # 1D array -> 2D array + if getattr(X, "ndim", 2) == 1: + return np.atleast_2d(X).T + return X + + +class DoubleTrans(BaseEstimator): + def fit(self, X, y=None): + return self + + def transform(self, X): + return 2 * X + + +class SparseMatrixTrans(BaseEstimator): + def __init__(self, csr_container): + self.csr_container = csr_container + + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + n_samples = len(X) + return self.csr_container(sparse.eye(n_samples, n_samples)) + + +class TransNo2D(BaseEstimator): + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + return X + + +class TransRaise(BaseEstimator): + def fit(self, X, y=None): + raise ValueError("specific message") + + def transform(self, X, y=None): + raise ValueError("specific message") + + +def test_column_transformer(): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + X_res_first1D = np.array([0, 1, 2]) + X_res_second1D = np.array([2, 4, 6]) + X_res_first = X_res_first1D.reshape(-1, 1) + X_res_both = X_array + + cases = [ + # single column 1D / 2D + (0, X_res_first), + ([0], X_res_first), + # list-like + ([0, 1], X_res_both), + (np.array([0, 1]), X_res_both), + # slice + (slice(0, 1), X_res_first), + (slice(0, 2), X_res_both), + # boolean mask + (np.array([True, False]), X_res_first), + ([True, False], X_res_first), + (np.array([True, True]), X_res_both), + ([True, True], X_res_both), + ] + + for selection, res in cases: + ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop") + assert_array_equal(ct.fit_transform(X_array), res) + assert_array_equal(ct.fit(X_array).transform(X_array), res) + + # callable that returns any of the allowed specifiers + ct = ColumnTransformer( + [("trans", Trans(), lambda x: selection)], remainder="drop" + ) + assert_array_equal(ct.fit_transform(X_array), res) + assert_array_equal(ct.fit(X_array).transform(X_array), res) + + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])]) + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + + # test with transformer_weights + transformer_weights = {"trans1": 0.1, "trans2": 10} + both = ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], + transformer_weights=transformer_weights, + ) + res = np.vstack( + [ + transformer_weights["trans1"] * X_res_first1D, + transformer_weights["trans2"] * X_res_second1D, + ] + ).T + assert_array_equal(both.fit_transform(X_array), res) + assert_array_equal(both.fit(X_array).transform(X_array), res) + assert len(both.transformers_) == 2 + + both = ColumnTransformer( + [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1} + ) + assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both) + assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both) + assert len(both.transformers_) == 1 + + +def test_column_transformer_tuple_transformers_parameter(): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + transformers = [("trans1", Trans(), [0]), ("trans2", Trans(), [1])] + + ct_with_list = ColumnTransformer(transformers) + ct_with_tuple = ColumnTransformer(tuple(transformers)) + + assert_array_equal( + ct_with_list.fit_transform(X_array), ct_with_tuple.fit_transform(X_array) + ) + assert_array_equal( + ct_with_list.fit(X_array).transform(X_array), + ct_with_tuple.fit(X_array).transform(X_array), + ) + + +@pytest.mark.parametrize("constructor_name", ["dataframe", "polars"]) +def test_column_transformer_dataframe(constructor_name): + if constructor_name == "dataframe": + dataframe_lib = pytest.importorskip("pandas") + else: + dataframe_lib = pytest.importorskip(constructor_name) + + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_df = _convert_container( + X_array, constructor_name, columns_name=["first", "second"] + ) + + X_res_first = np.array([0, 1, 2]).reshape(-1, 1) + X_res_both = X_array + + cases = [ + # String keys: label based + # list + (["first"], X_res_first), + (["first", "second"], X_res_both), + # slice + (slice("first", "second"), X_res_both), + # int keys: positional + # list + ([0], X_res_first), + ([0, 1], X_res_both), + (np.array([0, 1]), X_res_both), + # slice + (slice(0, 1), X_res_first), + (slice(0, 2), X_res_both), + # boolean mask + (np.array([True, False]), X_res_first), + ([True, False], X_res_first), + ] + if constructor_name == "dataframe": + # Scalars are only supported for pandas dataframes. + cases.extend( + [ + # scalar + (0, X_res_first), + ("first", X_res_first), + ( + dataframe_lib.Series([True, False], index=["first", "second"]), + X_res_first, + ), + ] + ) + + for selection, res in cases: + ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop") + assert_array_equal(ct.fit_transform(X_df), res) + assert_array_equal(ct.fit(X_df).transform(X_df), res) + + # callable that returns any of the allowed specifiers + ct = ColumnTransformer( + [("trans", Trans(), lambda X: selection)], remainder="drop" + ) + assert_array_equal(ct.fit_transform(X_df), res) + assert_array_equal(ct.fit(X_df).transform(X_df), res) + + ct = ColumnTransformer( + [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])] + ) + assert_array_equal(ct.fit_transform(X_df), X_res_both) + assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])]) + assert_array_equal(ct.fit_transform(X_df), X_res_both) + assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + # test with transformer_weights + transformer_weights = {"trans1": 0.1, "trans2": 10} + both = ColumnTransformer( + [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])], + transformer_weights=transformer_weights, + ) + res = np.vstack( + [ + transformer_weights["trans1"] * X_df["first"], + transformer_weights["trans2"] * X_df["second"], + ] + ).T + assert_array_equal(both.fit_transform(X_df), res) + assert_array_equal(both.fit(X_df).transform(X_df), res) + assert len(both.transformers_) == 2 + assert both.transformers_[-1][0] != "remainder" + + # test multiple columns + both = ColumnTransformer( + [("trans", Trans(), ["first", "second"])], transformer_weights={"trans": 0.1} + ) + assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both) + assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both) + assert len(both.transformers_) == 1 + assert both.transformers_[-1][0] != "remainder" + + both = ColumnTransformer( + [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1} + ) + assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both) + assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both) + assert len(both.transformers_) == 1 + assert both.transformers_[-1][0] != "remainder" + + # ensure pandas object is passed through + + class TransAssert(BaseEstimator): + def __init__(self, expected_type_transform): + self.expected_type_transform = expected_type_transform + + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + assert isinstance(X, self.expected_type_transform) + if isinstance(X, dataframe_lib.Series): + X = X.to_frame() + return X + + ct = ColumnTransformer( + [ + ( + "trans", + TransAssert(expected_type_transform=dataframe_lib.DataFrame), + ["first", "second"], + ) + ] + ) + ct.fit_transform(X_df) + + if constructor_name == "dataframe": + # DataFrame protocol does not have 1d columns, so we only test on Pandas + # dataframes. + ct = ColumnTransformer( + [ + ( + "trans", + TransAssert(expected_type_transform=dataframe_lib.Series), + "first", + ) + ], + remainder="drop", + ) + ct.fit_transform(X_df) + + # Only test on pandas because the dataframe protocol requires string column + # names + # integer column spec + integer column names -> still use positional + X_df2 = X_df.copy() + X_df2.columns = [1, 0] + ct = ColumnTransformer([("trans", Trans(), 0)], remainder="drop") + assert_array_equal(ct.fit_transform(X_df2), X_res_first) + assert_array_equal(ct.fit(X_df2).transform(X_df2), X_res_first) + + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert ct.transformers_[-1][1] == "drop" + assert_array_equal(ct.transformers_[-1][2], [1]) + + +@pytest.mark.parametrize("pandas", [True, False], ids=["pandas", "numpy"]) +@pytest.mark.parametrize( + "column_selection", + [[], np.array([False, False]), [False, False]], + ids=["list", "bool", "bool_int"], +) +@pytest.mark.parametrize("callable_column", [False, True]) +def test_column_transformer_empty_columns(pandas, column_selection, callable_column): + # test case that ensures that the column transformer does also work when + # a given transformer doesn't have any columns to work on + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_res_both = X_array + + if pandas: + pd = pytest.importorskip("pandas") + X = pd.DataFrame(X_array, columns=["first", "second"]) + else: + X = X_array + + if callable_column: + column = lambda X: column_selection # noqa + else: + column = column_selection + + ct = ColumnTransformer( + [("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), column)] + ) + assert_array_equal(ct.fit_transform(X), X_res_both) + assert_array_equal(ct.fit(X).transform(X), X_res_both) + assert len(ct.transformers_) == 2 + assert isinstance(ct.transformers_[1][1], TransRaise) + + ct = ColumnTransformer( + [("trans1", TransRaise(), column), ("trans2", Trans(), [0, 1])] + ) + assert_array_equal(ct.fit_transform(X), X_res_both) + assert_array_equal(ct.fit(X).transform(X), X_res_both) + assert len(ct.transformers_) == 2 + assert isinstance(ct.transformers_[0][1], TransRaise) + + ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="passthrough") + assert_array_equal(ct.fit_transform(X), X_res_both) + assert_array_equal(ct.fit(X).transform(X), X_res_both) + assert len(ct.transformers_) == 2 # including remainder + assert isinstance(ct.transformers_[0][1], TransRaise) + + fixture = np.array([[], [], []]) + ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="drop") + assert_array_equal(ct.fit_transform(X), fixture) + assert_array_equal(ct.fit(X).transform(X), fixture) + assert len(ct.transformers_) == 2 # including remainder + assert isinstance(ct.transformers_[0][1], TransRaise) + + +def test_column_transformer_output_indices(): + # Checks for the output_indices_ attribute + X_array = np.arange(6).reshape(3, 2) + + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])]) + X_trans = ct.fit_transform(X_array) + assert ct.output_indices_ == { + "trans1": slice(0, 1), + "trans2": slice(1, 2), + "remainder": slice(0, 0), + } + assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]]) + assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]]) + + # test with transformer_weights and multiple columns + ct = ColumnTransformer( + [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1} + ) + X_trans = ct.fit_transform(X_array) + assert ct.output_indices_ == {"trans": slice(0, 2), "remainder": slice(0, 0)} + assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]]) + + # test case that ensures that the attribute does also work when + # a given transformer doesn't have any columns to work on + ct = ColumnTransformer([("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), [])]) + X_trans = ct.fit_transform(X_array) + assert ct.output_indices_ == { + "trans1": slice(0, 2), + "trans2": slice(0, 0), + "remainder": slice(0, 0), + } + assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans1"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans2"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]]) + + ct = ColumnTransformer([("trans", TransRaise(), [])], remainder="passthrough") + X_trans = ct.fit_transform(X_array) + assert ct.output_indices_ == {"trans": slice(0, 0), "remainder": slice(0, 2)} + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans"]]) + assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["remainder"]]) + + +def test_column_transformer_output_indices_df(): + # Checks for the output_indices_ attribute with data frames + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame(np.arange(6).reshape(3, 2), columns=["first", "second"]) + + ct = ColumnTransformer( + [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])] + ) + X_trans = ct.fit_transform(X_df) + assert ct.output_indices_ == { + "trans1": slice(0, 1), + "trans2": slice(1, 2), + "remainder": slice(0, 0), + } + assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]]) + assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]]) + + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])]) + X_trans = ct.fit_transform(X_df) + assert ct.output_indices_ == { + "trans1": slice(0, 1), + "trans2": slice(1, 2), + "remainder": slice(0, 0), + } + assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]]) + assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_column_transformer_sparse_array(csr_container): + X_sparse = csr_container(sparse.eye(3, 2)) + + # no distinction between 1D and 2D + X_res_first = X_sparse[:, [0]] + X_res_both = X_sparse + + for col in [(0,), [0], slice(0, 1)]: + for remainder, res in [("drop", X_res_first), ("passthrough", X_res_both)]: + ct = ColumnTransformer( + [("trans", Trans(), col)], remainder=remainder, sparse_threshold=0.8 + ) + assert sparse.issparse(ct.fit_transform(X_sparse)) + assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res) + assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), res) + + for col in [[0, 1], slice(0, 2)]: + ct = ColumnTransformer([("trans", Trans(), col)], sparse_threshold=0.8) + assert sparse.issparse(ct.fit_transform(X_sparse)) + assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both) + assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), X_res_both) + + +def test_column_transformer_list(): + X_list = [[1, float("nan"), "a"], [0, 0, "b"]] + expected_result = np.array( + [ + [1, float("nan"), 1, 0], + [-1, 0, 0, 1], + ] + ) + + ct = ColumnTransformer( + [ + ("numerical", StandardScaler(), [0, 1]), + ("categorical", OneHotEncoder(), [2]), + ] + ) + + assert_array_equal(ct.fit_transform(X_list), expected_result) + assert_array_equal(ct.fit(X_list).transform(X_list), expected_result) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_column_transformer_sparse_stacking(csr_container): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + col_trans = ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)], + sparse_threshold=0.8, + ) + col_trans.fit(X_array) + X_trans = col_trans.transform(X_array) + assert sparse.issparse(X_trans) + assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1) + assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0])) + assert len(col_trans.transformers_) == 2 + assert col_trans.transformers_[-1][0] != "remainder" + + col_trans = ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)], + sparse_threshold=0.1, + ) + col_trans.fit(X_array) + X_trans = col_trans.transform(X_array) + assert not sparse.issparse(X_trans) + assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1) + assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0])) + + +def test_column_transformer_mixed_cols_sparse(): + df = np.array([["a", 1, True], ["b", 2, False]], dtype="O") + + ct = make_column_transformer( + (OneHotEncoder(), [0]), ("passthrough", [1, 2]), sparse_threshold=1.0 + ) + + # this shouldn't fail, since boolean can be coerced into a numeric + # See: https://github.com/scikit-learn/scikit-learn/issues/11912 + X_trans = ct.fit_transform(df) + assert X_trans.getformat() == "csr" + assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1], [0, 1, 2, 0]])) + + ct = make_column_transformer( + (OneHotEncoder(), [0]), ("passthrough", [0]), sparse_threshold=1.0 + ) + with pytest.raises(ValueError, match="For a sparse output, all columns should"): + # this fails since strings `a` and `b` cannot be + # coerced into a numeric. + ct.fit_transform(df) + + +def test_column_transformer_sparse_threshold(): + X_array = np.array([["a", "b"], ["A", "B"]], dtype=object).T + # above data has sparsity of 4 / 8 = 0.5 + + # apply threshold even if all sparse + col_trans = ColumnTransformer( + [("trans1", OneHotEncoder(), [0]), ("trans2", OneHotEncoder(), [1])], + sparse_threshold=0.2, + ) + res = col_trans.fit_transform(X_array) + assert not sparse.issparse(res) + assert not col_trans.sparse_output_ + + # mixed -> sparsity of (4 + 2) / 8 = 0.75 + for thres in [0.75001, 1]: + col_trans = ColumnTransformer( + [ + ("trans1", OneHotEncoder(sparse_output=True), [0]), + ("trans2", OneHotEncoder(sparse_output=False), [1]), + ], + sparse_threshold=thres, + ) + res = col_trans.fit_transform(X_array) + assert sparse.issparse(res) + assert col_trans.sparse_output_ + + for thres in [0.75, 0]: + col_trans = ColumnTransformer( + [ + ("trans1", OneHotEncoder(sparse_output=True), [0]), + ("trans2", OneHotEncoder(sparse_output=False), [1]), + ], + sparse_threshold=thres, + ) + res = col_trans.fit_transform(X_array) + assert not sparse.issparse(res) + assert not col_trans.sparse_output_ + + # if nothing is sparse -> no sparse + for thres in [0.33, 0, 1]: + col_trans = ColumnTransformer( + [ + ("trans1", OneHotEncoder(sparse_output=False), [0]), + ("trans2", OneHotEncoder(sparse_output=False), [1]), + ], + sparse_threshold=thres, + ) + res = col_trans.fit_transform(X_array) + assert not sparse.issparse(res) + assert not col_trans.sparse_output_ + + +def test_column_transformer_error_msg_1D(): + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + + col_trans = ColumnTransformer([("trans", StandardScaler(), 0)]) + msg = "1D data passed to a transformer" + with pytest.raises(ValueError, match=msg): + col_trans.fit(X_array) + + with pytest.raises(ValueError, match=msg): + col_trans.fit_transform(X_array) + + col_trans = ColumnTransformer([("trans", TransRaise(), 0)]) + for func in [col_trans.fit, col_trans.fit_transform]: + with pytest.raises(ValueError, match="specific message"): + func(X_array) + + +def test_2D_transformer_output(): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + # if one transformer is dropped, test that name is still correct + ct = ColumnTransformer([("trans1", "drop", 0), ("trans2", TransNo2D(), 1)]) + + msg = "the 'trans2' transformer should be 2D" + with pytest.raises(ValueError, match=msg): + ct.fit_transform(X_array) + # because fit is also doing transform, this raises already on fit + with pytest.raises(ValueError, match=msg): + ct.fit(X_array) + + +def test_2D_transformer_output_pandas(): + pd = pytest.importorskip("pandas") + + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_df = pd.DataFrame(X_array, columns=["col1", "col2"]) + + # if one transformer is dropped, test that name is still correct + ct = ColumnTransformer([("trans1", TransNo2D(), "col1")]) + msg = "the 'trans1' transformer should be 2D" + with pytest.raises(ValueError, match=msg): + ct.fit_transform(X_df) + # because fit is also doing transform, this raises already on fit + with pytest.raises(ValueError, match=msg): + ct.fit(X_df) + + +@pytest.mark.parametrize("remainder", ["drop", "passthrough"]) +def test_column_transformer_invalid_columns(remainder): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + # general invalid + for col in [1.5, ["string", 1], slice(1, "s"), np.array([1.0])]: + ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder) + with pytest.raises(ValueError, match="No valid specification"): + ct.fit(X_array) + + # invalid for arrays + for col in ["string", ["string", "other"], slice("a", "b")]: + ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder) + with pytest.raises(ValueError, match="Specifying the columns"): + ct.fit(X_array) + + # transformed n_features does not match fitted n_features + col = [0, 1] + ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder) + ct.fit(X_array) + X_array_more = np.array([[0, 1, 2], [2, 4, 6], [3, 6, 9]]).T + msg = "X has 3 features, but ColumnTransformer is expecting 2 features as input." + with pytest.raises(ValueError, match=msg): + ct.transform(X_array_more) + X_array_fewer = np.array( + [ + [0, 1, 2], + ] + ).T + err_msg = ( + "X has 1 features, but ColumnTransformer is expecting 2 features as input." + ) + with pytest.raises(ValueError, match=err_msg): + ct.transform(X_array_fewer) + + +def test_column_transformer_invalid_transformer(): + class NoTrans(BaseEstimator): + def fit(self, X, y=None): + return self + + def predict(self, X): + return X + + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + ct = ColumnTransformer([("trans", NoTrans(), [0])]) + msg = "All estimators should implement fit and transform" + with pytest.raises(TypeError, match=msg): + ct.fit(X_array) + + +def test_make_column_transformer(): + scaler = StandardScaler() + norm = Normalizer() + ct = make_column_transformer((scaler, "first"), (norm, ["second"])) + names, transformers, columns = zip(*ct.transformers) + assert names == ("standardscaler", "normalizer") + assert transformers == (scaler, norm) + assert columns == ("first", ["second"]) + + +def test_make_column_transformer_pandas(): + pd = pytest.importorskip("pandas") + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_df = pd.DataFrame(X_array, columns=["first", "second"]) + norm = Normalizer() + ct1 = ColumnTransformer([("norm", Normalizer(), X_df.columns)]) + ct2 = make_column_transformer((norm, X_df.columns)) + assert_almost_equal(ct1.fit_transform(X_df), ct2.fit_transform(X_df)) + + +def test_make_column_transformer_kwargs(): + scaler = StandardScaler() + norm = Normalizer() + ct = make_column_transformer( + (scaler, "first"), + (norm, ["second"]), + n_jobs=3, + remainder="drop", + sparse_threshold=0.5, + ) + assert ( + ct.transformers + == make_column_transformer((scaler, "first"), (norm, ["second"])).transformers + ) + assert ct.n_jobs == 3 + assert ct.remainder == "drop" + assert ct.sparse_threshold == 0.5 + # invalid keyword parameters should raise an error message + msg = re.escape( + "make_column_transformer() got an unexpected " + "keyword argument 'transformer_weights'" + ) + with pytest.raises(TypeError, match=msg): + make_column_transformer( + (scaler, "first"), + (norm, ["second"]), + transformer_weights={"pca": 10, "Transf": 1}, + ) + + +def test_make_column_transformer_remainder_transformer(): + scaler = StandardScaler() + norm = Normalizer() + remainder = StandardScaler() + ct = make_column_transformer( + (scaler, "first"), (norm, ["second"]), remainder=remainder + ) + assert ct.remainder == remainder + + +def test_column_transformer_get_set_params(): + ct = ColumnTransformer( + [("trans1", StandardScaler(), [0]), ("trans2", StandardScaler(), [1])] + ) + + exp = { + "n_jobs": None, + "remainder": "drop", + "sparse_threshold": 0.3, + "trans1": ct.transformers[0][1], + "trans1__copy": True, + "trans1__with_mean": True, + "trans1__with_std": True, + "trans2": ct.transformers[1][1], + "trans2__copy": True, + "trans2__with_mean": True, + "trans2__with_std": True, + "transformers": ct.transformers, + "transformer_weights": None, + "verbose_feature_names_out": True, + "verbose": False, + } + + assert ct.get_params() == exp + + ct.set_params(trans1__with_mean=False) + assert not ct.get_params()["trans1__with_mean"] + + ct.set_params(trans1="passthrough") + exp = { + "n_jobs": None, + "remainder": "drop", + "sparse_threshold": 0.3, + "trans1": "passthrough", + "trans2": ct.transformers[1][1], + "trans2__copy": True, + "trans2__with_mean": True, + "trans2__with_std": True, + "transformers": ct.transformers, + "transformer_weights": None, + "verbose_feature_names_out": True, + "verbose": False, + } + + assert ct.get_params() == exp + + +def test_column_transformer_named_estimators(): + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + ct = ColumnTransformer( + [ + ("trans1", StandardScaler(), [0]), + ("trans2", StandardScaler(with_std=False), [1]), + ] + ) + assert not hasattr(ct, "transformers_") + ct.fit(X_array) + assert hasattr(ct, "transformers_") + assert isinstance(ct.named_transformers_["trans1"], StandardScaler) + assert isinstance(ct.named_transformers_.trans1, StandardScaler) + assert isinstance(ct.named_transformers_["trans2"], StandardScaler) + assert isinstance(ct.named_transformers_.trans2, StandardScaler) + assert not ct.named_transformers_.trans2.with_std + # check it are fitted transformers + assert ct.named_transformers_.trans1.mean_ == 1.0 + + +def test_column_transformer_cloning(): + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + + ct = ColumnTransformer([("trans", StandardScaler(), [0])]) + ct.fit(X_array) + assert not hasattr(ct.transformers[0][1], "mean_") + assert hasattr(ct.transformers_[0][1], "mean_") + + ct = ColumnTransformer([("trans", StandardScaler(), [0])]) + ct.fit_transform(X_array) + assert not hasattr(ct.transformers[0][1], "mean_") + assert hasattr(ct.transformers_[0][1], "mean_") + + +def test_column_transformer_get_feature_names(): + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + ct = ColumnTransformer([("trans", Trans(), [0, 1])]) + # raise correct error when not fitted + with pytest.raises(NotFittedError): + ct.get_feature_names_out() + # raise correct error when no feature names are available + ct.fit(X_array) + msg = re.escape( + "Transformer trans (type Trans) does not provide get_feature_names_out" + ) + with pytest.raises(AttributeError, match=msg): + ct.get_feature_names_out() + + +def test_column_transformer_special_strings(): + # one 'drop' -> ignore + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "drop", [1])]) + exp = np.array([[0.0], [1.0], [2.0]]) + assert_array_equal(ct.fit_transform(X_array), exp) + assert_array_equal(ct.fit(X_array).transform(X_array), exp) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + # all 'drop' -> return shape 0 array + ct = ColumnTransformer([("trans1", "drop", [0]), ("trans2", "drop", [1])]) + assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0)) + assert_array_equal(ct.fit_transform(X_array).shape, (3, 0)) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + # 'passthrough' + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "passthrough", [1])]) + exp = X_array + assert_array_equal(ct.fit_transform(X_array), exp) + assert_array_equal(ct.fit(X_array).transform(X_array), exp) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + +def test_column_transformer_remainder(): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + X_res_first = np.array([0, 1, 2]).reshape(-1, 1) + X_res_second = np.array([2, 4, 6]).reshape(-1, 1) + X_res_both = X_array + + # default drop + ct = ColumnTransformer([("trans1", Trans(), [0])]) + assert_array_equal(ct.fit_transform(X_array), X_res_first) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert ct.transformers_[-1][1] == "drop" + assert_array_equal(ct.transformers_[-1][2], [1]) + + # specify passthrough + ct = ColumnTransformer([("trans", Trans(), [0])], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [1]) + + # column order is not preserved (passed through added to end) + ct = ColumnTransformer([("trans1", Trans(), [1])], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_array), X_res_both[:, ::-1]) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both[:, ::-1]) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [0]) + + # passthrough when all actual transformers are skipped + ct = ColumnTransformer([("trans1", "drop", [0])], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_array), X_res_second) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_second) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [1]) + + # check default for make_column_transformer + ct = make_column_transformer((Trans(), [0])) + assert ct.remainder == "drop" + + +@pytest.mark.parametrize( + "key", [[0], np.array([0]), slice(0, 1), np.array([True, False])] +) +def test_column_transformer_remainder_numpy(key): + # test different ways that columns are specified with passthrough + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_res_both = X_array + + ct = ColumnTransformer([("trans1", Trans(), key)], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [1]) + + +@pytest.mark.parametrize( + "key", + [ + [0], + slice(0, 1), + np.array([True, False]), + ["first"], + "pd-index", + np.array(["first"]), + np.array(["first"], dtype=object), + slice(None, "first"), + slice("first", "first"), + ], +) +def test_column_transformer_remainder_pandas(key): + # test different ways that columns are specified with passthrough + pd = pytest.importorskip("pandas") + if isinstance(key, str) and key == "pd-index": + key = pd.Index(["first"]) + + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_df = pd.DataFrame(X_array, columns=["first", "second"]) + X_res_both = X_array + + ct = ColumnTransformer([("trans1", Trans(), key)], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_df), X_res_both) + assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [1]) + + +@pytest.mark.parametrize( + "key", [[0], np.array([0]), slice(0, 1), np.array([True, False, False])] +) +def test_column_transformer_remainder_transformer(key): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + X_res_both = X_array.copy() + + # second and third columns are doubled when remainder = DoubleTrans + X_res_both[:, 1:3] *= 2 + + ct = ColumnTransformer([("trans1", Trans(), key)], remainder=DoubleTrans()) + + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], DoubleTrans) + assert_array_equal(ct.transformers_[-1][2], [1, 2]) + + +def test_column_transformer_no_remaining_remainder_transformer(): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + + ct = ColumnTransformer([("trans1", Trans(), [0, 1, 2])], remainder=DoubleTrans()) + + assert_array_equal(ct.fit_transform(X_array), X_array) + assert_array_equal(ct.fit(X_array).transform(X_array), X_array) + assert len(ct.transformers_) == 1 + assert ct.transformers_[-1][0] != "remainder" + + +def test_column_transformer_drops_all_remainder_transformer(): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + + # columns are doubled when remainder = DoubleTrans + X_res_both = 2 * X_array.copy()[:, 1:3] + + ct = ColumnTransformer([("trans1", "drop", [0])], remainder=DoubleTrans()) + + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], DoubleTrans) + assert_array_equal(ct.transformers_[-1][2], [1, 2]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_column_transformer_sparse_remainder_transformer(csr_container): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + + ct = ColumnTransformer( + [("trans1", Trans(), [0])], + remainder=SparseMatrixTrans(csr_container), + sparse_threshold=0.8, + ) + + X_trans = ct.fit_transform(X_array) + assert sparse.issparse(X_trans) + # SparseMatrixTrans creates 3 features for each column. There is + # one column in ``transformers``, thus: + assert X_trans.shape == (3, 3 + 1) + + exp_array = np.hstack((X_array[:, 0].reshape(-1, 1), np.eye(3))) + assert_array_equal(X_trans.toarray(), exp_array) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans) + assert_array_equal(ct.transformers_[-1][2], [1, 2]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_column_transformer_drop_all_sparse_remainder_transformer(csr_container): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + ct = ColumnTransformer( + [("trans1", "drop", [0])], + remainder=SparseMatrixTrans(csr_container), + sparse_threshold=0.8, + ) + + X_trans = ct.fit_transform(X_array) + assert sparse.issparse(X_trans) + + # SparseMatrixTrans creates 3 features for each column, thus: + assert X_trans.shape == (3, 3) + assert_array_equal(X_trans.toarray(), np.eye(3)) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans) + assert_array_equal(ct.transformers_[-1][2], [1, 2]) + + +def test_column_transformer_get_set_params_with_remainder(): + ct = ColumnTransformer( + [("trans1", StandardScaler(), [0])], remainder=StandardScaler() + ) + + exp = { + "n_jobs": None, + "remainder": ct.remainder, + "remainder__copy": True, + "remainder__with_mean": True, + "remainder__with_std": True, + "sparse_threshold": 0.3, + "trans1": ct.transformers[0][1], + "trans1__copy": True, + "trans1__with_mean": True, + "trans1__with_std": True, + "transformers": ct.transformers, + "transformer_weights": None, + "verbose_feature_names_out": True, + "verbose": False, + } + + assert ct.get_params() == exp + + ct.set_params(remainder__with_std=False) + assert not ct.get_params()["remainder__with_std"] + + ct.set_params(trans1="passthrough") + exp = { + "n_jobs": None, + "remainder": ct.remainder, + "remainder__copy": True, + "remainder__with_mean": True, + "remainder__with_std": False, + "sparse_threshold": 0.3, + "trans1": "passthrough", + "transformers": ct.transformers, + "transformer_weights": None, + "verbose_feature_names_out": True, + "verbose": False, + } + assert ct.get_params() == exp + + +def test_column_transformer_no_estimators(): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).astype("float").T + ct = ColumnTransformer([], remainder=StandardScaler()) + + params = ct.get_params() + assert params["remainder__with_mean"] + + X_trans = ct.fit_transform(X_array) + assert X_trans.shape == X_array.shape + assert len(ct.transformers_) == 1 + assert ct.transformers_[-1][0] == "remainder" + assert ct.transformers_[-1][2] == [0, 1, 2] + + +@pytest.mark.parametrize( + ["est", "pattern"], + [ + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], + remainder=DoubleTrans(), + ), + ( + r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n" + r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], + remainder="passthrough", + ), + ( + r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n" + r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", "drop", [1])], + remainder="passthrough", + ), + ( + r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", "passthrough", [1])], + remainder="passthrough", + ), + ( + r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n" + r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer([("trans1", Trans(), [0])], remainder="passthrough"), + ( + r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], remainder="drop" + ), + ( + r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 2\) Processing trans2.* total=.*\n$" + ), + ), + ( + ColumnTransformer([("trans1", Trans(), [0])], remainder="drop"), + r"\[ColumnTransformer\].*\(1 of 1\) Processing trans1.* total=.*\n$", + ), + ], +) +@pytest.mark.parametrize("method", ["fit", "fit_transform"]) +def test_column_transformer_verbose(est, pattern, method, capsys): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + + func = getattr(est, method) + est.set_params(verbose=False) + func(X_array) + assert not capsys.readouterr().out, "Got output for verbose=False" + + est.set_params(verbose=True) + func(X_array) + assert re.match(pattern, capsys.readouterr()[0]) + + +def test_column_transformer_no_estimators_set_params(): + ct = ColumnTransformer([]).set_params(n_jobs=2) + assert ct.n_jobs == 2 + + +def test_column_transformer_callable_specifier(): + # assert that function gets the full array + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_res_first = np.array([[0, 1, 2]]).T + + def func(X): + assert_array_equal(X, X_array) + return [0] + + ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop") + assert_array_equal(ct.fit_transform(X_array), X_res_first) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first) + assert callable(ct.transformers[0][2]) + assert ct.transformers_[0][2] == [0] + + +def test_column_transformer_callable_specifier_dataframe(): + # assert that function gets the full dataframe + pd = pytest.importorskip("pandas") + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_res_first = np.array([[0, 1, 2]]).T + + X_df = pd.DataFrame(X_array, columns=["first", "second"]) + + def func(X): + assert_array_equal(X.columns, X_df.columns) + assert_array_equal(X.values, X_df.values) + return ["first"] + + ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop") + assert_array_equal(ct.fit_transform(X_df), X_res_first) + assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first) + assert callable(ct.transformers[0][2]) + assert ct.transformers_[0][2] == ["first"] + + +def test_column_transformer_negative_column_indexes(): + X = np.random.randn(2, 2) + X_categories = np.array([[1], [2]]) + X = np.concatenate([X, X_categories], axis=1) + + ohe = OneHotEncoder() + + tf_1 = ColumnTransformer([("ohe", ohe, [-1])], remainder="passthrough") + tf_2 = ColumnTransformer([("ohe", ohe, [2])], remainder="passthrough") + assert_array_equal(tf_1.fit_transform(X), tf_2.fit_transform(X)) + + +@pytest.mark.parametrize("array_type", [np.asarray, *CSR_CONTAINERS]) +def test_column_transformer_mask_indexing(array_type): + # Regression test for #14510 + # Boolean array-like does not behave as boolean array with sparse matrices. + X = np.transpose([[1, 2, 3], [4, 5, 6], [5, 6, 7], [8, 9, 10]]) + X = array_type(X) + column_transformer = ColumnTransformer( + [("identity", FunctionTransformer(), [False, True, False, True])] + ) + X_trans = column_transformer.fit_transform(X) + assert X_trans.shape == (3, 2) + + +def test_n_features_in(): + # make sure n_features_in is what is passed as input to the column + # transformer. + + X = [[1, 2], [3, 4], [5, 6]] + ct = ColumnTransformer([("a", DoubleTrans(), [0]), ("b", DoubleTrans(), [1])]) + assert not hasattr(ct, "n_features_in_") + ct.fit(X) + assert ct.n_features_in_ == 2 + + +@pytest.mark.parametrize( + "cols, pattern, include, exclude", + [ + (["col_int", "col_float"], None, np.number, None), + (["col_int", "col_float"], None, None, object), + (["col_int", "col_float"], None, [int, float], None), + (["col_str"], None, [object], None), + (["col_str"], None, object, None), + (["col_float"], None, float, None), + (["col_float"], "at$", [np.number], None), + (["col_int"], None, [int], None), + (["col_int"], "^col_int", [np.number], None), + (["col_float", "col_str"], "float|str", None, None), + (["col_str"], "^col_s", None, [int]), + ([], "str$", float, None), + (["col_int", "col_float", "col_str"], None, [np.number, object], None), + ], +) +def test_make_column_selector_with_select_dtypes(cols, pattern, include, exclude): + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame( + { + "col_int": np.array([0, 1, 2], dtype=int), + "col_float": np.array([0.0, 1.0, 2.0], dtype=float), + "col_str": ["one", "two", "three"], + }, + columns=["col_int", "col_float", "col_str"], + ) + + selector = make_column_selector( + dtype_include=include, dtype_exclude=exclude, pattern=pattern + ) + + assert_array_equal(selector(X_df), cols) + + +def test_column_transformer_with_make_column_selector(): + # Functional test for column transformer + column selector + pd = pytest.importorskip("pandas") + X_df = pd.DataFrame( + { + "col_int": np.array([0, 1, 2], dtype=int), + "col_float": np.array([0.0, 1.0, 2.0], dtype=float), + "col_cat": ["one", "two", "one"], + "col_str": ["low", "middle", "high"], + }, + columns=["col_int", "col_float", "col_cat", "col_str"], + ) + X_df["col_str"] = X_df["col_str"].astype("category") + + cat_selector = make_column_selector(dtype_include=["category", object]) + num_selector = make_column_selector(dtype_include=np.number) + + ohe = OneHotEncoder() + scaler = StandardScaler() + + ct_selector = make_column_transformer((ohe, cat_selector), (scaler, num_selector)) + ct_direct = make_column_transformer( + (ohe, ["col_cat", "col_str"]), (scaler, ["col_float", "col_int"]) + ) + + X_selector = ct_selector.fit_transform(X_df) + X_direct = ct_direct.fit_transform(X_df) + + assert_allclose(X_selector, X_direct) + + +def test_make_column_selector_error(): + selector = make_column_selector(dtype_include=np.number) + X = np.array([[0.1, 0.2]]) + msg = "make_column_selector can only be applied to pandas dataframes" + with pytest.raises(ValueError, match=msg): + selector(X) + + +def test_make_column_selector_pickle(): + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame( + { + "col_int": np.array([0, 1, 2], dtype=int), + "col_float": np.array([0.0, 1.0, 2.0], dtype=float), + "col_str": ["one", "two", "three"], + }, + columns=["col_int", "col_float", "col_str"], + ) + + selector = make_column_selector(dtype_include=[object]) + selector_picked = pickle.loads(pickle.dumps(selector)) + + assert_array_equal(selector(X_df), selector_picked(X_df)) + + +@pytest.mark.parametrize( + "empty_col", + [[], np.array([], dtype=int), lambda x: []], + ids=["list", "array", "callable"], +) +def test_feature_names_empty_columns(empty_col): + pd = pytest.importorskip("pandas") + + df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]}) + + ct = ColumnTransformer( + transformers=[ + ("ohe", OneHotEncoder(), ["col1", "col2"]), + ("empty_features", OneHotEncoder(), empty_col), + ], + ) + + ct.fit(df) + assert_array_equal( + ct.get_feature_names_out(), ["ohe__col1_a", "ohe__col1_b", "ohe__col2_z"] + ) + + +@pytest.mark.parametrize( + "selector", + [ + [1], + lambda x: [1], + ["col2"], + lambda x: ["col2"], + [False, True], + lambda x: [False, True], + ], +) +def test_feature_names_out_pandas(selector): + """Checks name when selecting only the second column""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]}) + ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)]) + ct.fit(df) + + assert_array_equal(ct.get_feature_names_out(), ["ohe__col2_z"]) + + +@pytest.mark.parametrize( + "selector", [[1], lambda x: [1], [False, True], lambda x: [False, True]] +) +def test_feature_names_out_non_pandas(selector): + """Checks name when selecting the second column with numpy array""" + X = [["a", "z"], ["a", "z"], ["b", "z"]] + ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)]) + ct.fit(X) + + assert_array_equal(ct.get_feature_names_out(), ["ohe__x1_z"]) + + +@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()]) +def test_sk_visual_block_remainder(remainder): + # remainder='passthrough' or an estimator will be shown in repr_html + ohe = OneHotEncoder() + ct = ColumnTransformer( + transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder + ) + visual_block = ct._sk_visual_block_() + assert visual_block.names == ("ohe", "remainder") + assert visual_block.name_details == (["col1", "col2"], "") + assert visual_block.estimators == (ohe, remainder) + + +def test_sk_visual_block_remainder_drop(): + # remainder='drop' is not shown in repr_html + ohe = OneHotEncoder() + ct = ColumnTransformer(transformers=[("ohe", ohe, ["col1", "col2"])]) + visual_block = ct._sk_visual_block_() + assert visual_block.names == ("ohe",) + assert visual_block.name_details == (["col1", "col2"],) + assert visual_block.estimators == (ohe,) + + +@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()]) +def test_sk_visual_block_remainder_fitted_pandas(remainder): + # Remainder shows the columns after fitting + pd = pytest.importorskip("pandas") + ohe = OneHotEncoder() + ct = ColumnTransformer( + transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder + ) + df = pd.DataFrame( + { + "col1": ["a", "b", "c"], + "col2": ["z", "z", "z"], + "col3": [1, 2, 3], + "col4": [3, 4, 5], + } + ) + ct.fit(df) + visual_block = ct._sk_visual_block_() + assert visual_block.names == ("ohe", "remainder") + assert visual_block.name_details == (["col1", "col2"], ["col3", "col4"]) + assert visual_block.estimators == (ohe, remainder) + + +@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()]) +def test_sk_visual_block_remainder_fitted_numpy(remainder): + # Remainder shows the indices after fitting + X = np.array([[1, 2, 3], [4, 5, 6]], dtype=float) + scaler = StandardScaler() + ct = ColumnTransformer( + transformers=[("scale", scaler, [0, 2])], remainder=remainder + ) + ct.fit(X) + visual_block = ct._sk_visual_block_() + assert visual_block.names == ("scale", "remainder") + assert visual_block.name_details == ([0, 2], [1]) + assert visual_block.estimators == (scaler, remainder) + + +@pytest.mark.parametrize("explicit_colname", ["first", "second", 0, 1]) +@pytest.mark.parametrize("remainder", [Trans(), "passthrough", "drop"]) +def test_column_transformer_reordered_column_names_remainder( + explicit_colname, remainder +): + """Test the interaction between remainder and column transformer""" + pd = pytest.importorskip("pandas") + + X_fit_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_fit_df = pd.DataFrame(X_fit_array, columns=["first", "second"]) + + X_trans_array = np.array([[2, 4, 6], [0, 1, 2]]).T + X_trans_df = pd.DataFrame(X_trans_array, columns=["second", "first"]) + + tf = ColumnTransformer([("bycol", Trans(), explicit_colname)], remainder=remainder) + + tf.fit(X_fit_df) + X_fit_trans = tf.transform(X_fit_df) + + # Changing the order still works + X_trans = tf.transform(X_trans_df) + assert_allclose(X_trans, X_fit_trans) + + # extra columns are ignored + X_extended_df = X_fit_df.copy() + X_extended_df["third"] = [3, 6, 9] + X_trans = tf.transform(X_extended_df) + assert_allclose(X_trans, X_fit_trans) + + if isinstance(explicit_colname, str): + # Raise error if columns are specified by names but input only allows + # to specify by position, e.g. numpy array instead of a pandas df. + X_array = X_fit_array.copy() + err_msg = "Specifying the columns" + with pytest.raises(ValueError, match=err_msg): + tf.transform(X_array) + + +def test_feature_name_validation_missing_columns_drop_passthough(): + """Test the interaction between {'drop', 'passthrough'} and + missing column names.""" + pd = pytest.importorskip("pandas") + + X = np.ones(shape=(3, 4)) + df = pd.DataFrame(X, columns=["a", "b", "c", "d"]) + + df_dropped = df.drop("c", axis=1) + + # with remainder='passthrough', all columns seen during `fit` must be + # present + tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="passthrough") + tf.fit(df) + msg = r"columns are missing: {'c'}" + with pytest.raises(ValueError, match=msg): + tf.transform(df_dropped) + + # with remainder='drop', it is allowed to have column 'c' missing + tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="drop") + tf.fit(df) + + df_dropped_trans = tf.transform(df_dropped) + df_fit_trans = tf.transform(df) + assert_allclose(df_dropped_trans, df_fit_trans) + + # bycol drops 'c', thus it is allowed for 'c' to be missing + tf = ColumnTransformer([("bycol", "drop", ["c"])], remainder="passthrough") + tf.fit(df) + df_dropped_trans = tf.transform(df_dropped) + df_fit_trans = tf.transform(df) + assert_allclose(df_dropped_trans, df_fit_trans) + + +def test_feature_names_in_(): + """Feature names are stored in column transformer. + + Column transformer deliberately does not check for column name consistency. + It only checks that the non-dropped names seen in `fit` are seen + in `transform`. This behavior is already tested in + `test_feature_name_validation_missing_columns_drop_passthough`""" + + pd = pytest.importorskip("pandas") + + feature_names = ["a", "c", "d"] + df = pd.DataFrame([[1, 2, 3]], columns=feature_names) + ct = ColumnTransformer([("bycol", Trans(), ["a", "d"])], remainder="passthrough") + + ct.fit(df) + assert_array_equal(ct.feature_names_in_, feature_names) + assert isinstance(ct.feature_names_in_, np.ndarray) + assert ct.feature_names_in_.dtype == object + + +class TransWithNames(Trans): + def __init__(self, feature_names_out=None): + self.feature_names_out = feature_names_out + + def get_feature_names_out(self, input_features=None): + if self.feature_names_out is not None: + return np.asarray(self.feature_names_out, dtype=object) + return input_features + + +@pytest.mark.parametrize( + "transformers, remainder, expected_names", + [ + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", ["d"]), + ], + "passthrough", + ["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", ["d"]), + ], + "drop", + ["bycol1__d", "bycol1__c", "bycol2__d"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", ["d"]), + ], + "passthrough", + ["bycol1__b", "remainder__a", "remainder__c"], + ), + ( + [ + ("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]), + ], + "passthrough", + ["bycol1__pca1", "bycol1__pca2", "remainder__c"], + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), ["d"]), + ("bycol2", "passthrough", ["b"]), + ], + "drop", + ["bycol1__a", "bycol1__b", "bycol2__b"], + ), + ( + [ + ("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]), + ("bycol2", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]), + ], + "passthrough", + [ + "bycol1__pca0", + "bycol1__pca1", + "bycol2__pca0", + "bycol2__pca1", + "remainder__a", + "remainder__c", + "remainder__d", + ], + ), + ( + [ + ("bycol1", "drop", ["d"]), + ], + "drop", + [], + ), + ( + [ + ("bycol1", TransWithNames(), slice(1, 3)), + ], + "drop", + ["bycol1__b", "bycol1__c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", slice(3, 4)), + ], + "passthrough", + ["bycol1__b", "remainder__a", "remainder__c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice(3, 4)), + ], + "passthrough", + ["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"], + ), + ( + [ + ("bycol1", TransWithNames(), slice("b", "c")), + ], + "drop", + ["bycol1__b", "bycol1__c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", slice("c", "d")), + ], + "passthrough", + ["bycol1__b", "remainder__a"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice("c", "d")), + ], + "passthrough", + [ + "bycol1__d", + "bycol1__c", + "bycol2__c", + "bycol2__d", + "remainder__a", + "remainder__b", + ], + ), + ], +) +def test_verbose_feature_names_out_true(transformers, remainder, expected_names): + """Check feature_names_out for verbose_feature_names_out=True (default)""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"]) + ct = ColumnTransformer( + transformers, + remainder=remainder, + ) + ct.fit(df) + + names = ct.get_feature_names_out() + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, expected_names) + + +@pytest.mark.parametrize( + "transformers, remainder, expected_names", + [ + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", ["a"]), + ], + "passthrough", + ["d", "c", "a", "b"], + ), + ( + [ + ("bycol1", TransWithNames(["a"]), ["d", "c"]), + ("bycol2", "passthrough", ["d"]), + ], + "drop", + ["a", "d"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", ["d"]), + ], + "passthrough", + ["b", "a", "c"], + ), + ( + [ + ("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]), + ], + "passthrough", + ["pca1", "pca2", "c"], + ), + ( + [ + ("bycol1", TransWithNames(["a", "c"]), ["d"]), + ("bycol2", "passthrough", ["d"]), + ], + "drop", + ["a", "c", "d"], + ), + ( + [ + ("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]), + ("bycol2", TransWithNames([f"kpca{i}" for i in range(2)]), ["b"]), + ], + "passthrough", + ["pca0", "pca1", "kpca0", "kpca1", "a", "c", "d"], + ), + ( + [ + ("bycol1", "drop", ["d"]), + ], + "drop", + [], + ), + ( + [ + ("bycol1", TransWithNames(), slice(1, 2)), + ("bycol2", "drop", ["d"]), + ], + "passthrough", + ["b", "a", "c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", slice(3, 4)), + ], + "passthrough", + ["b", "a", "c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice(0, 2)), + ], + "drop", + ["d", "c", "a", "b"], + ), + ( + [ + ("bycol1", TransWithNames(), slice("a", "b")), + ("bycol2", "drop", ["d"]), + ], + "passthrough", + ["a", "b", "c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", slice("c", "d")), + ], + "passthrough", + ["b", "a"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice("a", "b")), + ], + "drop", + ["d", "c", "a", "b"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice("b", "b")), + ], + "drop", + ["d", "c", "b"], + ), + ], +) +def test_verbose_feature_names_out_false(transformers, remainder, expected_names): + """Check feature_names_out for verbose_feature_names_out=False""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"]) + ct = ColumnTransformer( + transformers, + remainder=remainder, + verbose_feature_names_out=False, + ) + ct.fit(df) + + names = ct.get_feature_names_out() + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, expected_names) + + +@pytest.mark.parametrize( + "transformers, remainder, colliding_columns", + [ + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "passthrough", ["b"]), + ], + "drop", + "['b']", + ), + ( + [ + ("bycol1", TransWithNames(["c", "d"]), ["c"]), + ("bycol2", "passthrough", ["c"]), + ], + "drop", + "['c']", + ), + ( + [ + ("bycol1", TransWithNames(["a"]), ["b"]), + ("bycol2", "passthrough", ["b"]), + ], + "passthrough", + "['a']", + ), + ( + [ + ("bycol1", TransWithNames(["a"]), ["b"]), + ("bycol2", "drop", ["b"]), + ], + "passthrough", + "['a']", + ), + ( + [ + ("bycol1", TransWithNames(["c", "b"]), ["b"]), + ("bycol2", "passthrough", ["c", "b"]), + ], + "drop", + "['b', 'c']", + ), + ( + [ + ("bycol1", TransWithNames(["a"]), ["b"]), + ("bycol2", "passthrough", ["a"]), + ("bycol3", TransWithNames(["a"]), ["b"]), + ], + "passthrough", + "['a']", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), ["b"]), + ("bycol2", "passthrough", ["a"]), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ( + [ + ("bycol1", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]), + ("bycol2", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]), + ], + "passthrough", + "['pca0', 'pca1', 'pca2', 'pca3', 'pca4', ...]", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), slice(1, 2)), + ("bycol2", "passthrough", ["a"]), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), ["b"]), + ("bycol2", "passthrough", slice(0, 1)), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), slice("b", "c")), + ("bycol2", "passthrough", ["a"]), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), ["b"]), + ("bycol2", "passthrough", slice("a", "a")), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ], +) +def test_verbose_feature_names_out_false_errors( + transformers, remainder, colliding_columns +): + """Check feature_names_out for verbose_feature_names_out=False""" + + pd = pytest.importorskip("pandas") + df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"]) + ct = ColumnTransformer( + transformers, + remainder=remainder, + verbose_feature_names_out=False, + ) + ct.fit(df) + + msg = re.escape( + f"Output feature names: {colliding_columns} are not unique. Please set " + "verbose_feature_names_out=True to add prefixes to feature names" + ) + with pytest.raises(ValueError, match=msg): + ct.get_feature_names_out() + + +@pytest.mark.parametrize("verbose_feature_names_out", [True, False]) +@pytest.mark.parametrize("remainder", ["drop", "passthrough"]) +def test_column_transformer_set_output(verbose_feature_names_out, remainder): + """Check column transformer behavior with set_output.""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"], index=[10]) + ct = ColumnTransformer( + [("first", TransWithNames(), ["a", "c"]), ("second", TransWithNames(), ["d"])], + remainder=remainder, + verbose_feature_names_out=verbose_feature_names_out, + ) + X_trans = ct.fit_transform(df) + assert isinstance(X_trans, np.ndarray) + + ct.set_output(transform="pandas") + + df_test = pd.DataFrame([[1, 2, 3, 4]], columns=df.columns, index=[20]) + X_trans = ct.transform(df_test) + assert isinstance(X_trans, pd.DataFrame) + + feature_names_out = ct.get_feature_names_out() + assert_array_equal(X_trans.columns, feature_names_out) + assert_array_equal(X_trans.index, df_test.index) + + +@pytest.mark.parametrize("remainder", ["drop", "passthrough"]) +@pytest.mark.parametrize("fit_transform", [True, False]) +def test_column_transform_set_output_mixed(remainder, fit_transform): + """Check ColumnTransformer outputs mixed types correctly.""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame( + { + "pet": pd.Series(["dog", "cat", "snake"], dtype="category"), + "color": pd.Series(["green", "blue", "red"], dtype="object"), + "age": [1.4, 2.1, 4.4], + "height": [20, 40, 10], + "distance": pd.Series([20, pd.NA, 100], dtype="Int32"), + } + ) + ct = ColumnTransformer( + [ + ( + "color_encode", + OneHotEncoder(sparse_output=False, dtype="int8"), + ["color"], + ), + ("age", StandardScaler(), ["age"]), + ], + remainder=remainder, + verbose_feature_names_out=False, + ).set_output(transform="pandas") + if fit_transform: + X_trans = ct.fit_transform(df) + else: + X_trans = ct.fit(df).transform(df) + + assert isinstance(X_trans, pd.DataFrame) + assert_array_equal(X_trans.columns, ct.get_feature_names_out()) + + expected_dtypes = { + "color_blue": "int8", + "color_green": "int8", + "color_red": "int8", + "age": "float64", + "pet": "category", + "height": "int64", + "distance": "Int32", + } + for col, dtype in X_trans.dtypes.items(): + assert dtype == expected_dtypes[col] + + +@pytest.mark.parametrize("remainder", ["drop", "passthrough"]) +def test_column_transform_set_output_after_fitting(remainder): + pd = pytest.importorskip("pandas") + df = pd.DataFrame( + { + "pet": pd.Series(["dog", "cat", "snake"], dtype="category"), + "age": [1.4, 2.1, 4.4], + "height": [20, 40, 10], + } + ) + ct = ColumnTransformer( + [ + ( + "color_encode", + OneHotEncoder(sparse_output=False, dtype="int16"), + ["pet"], + ), + ("age", StandardScaler(), ["age"]), + ], + remainder=remainder, + verbose_feature_names_out=False, + ) + + # fit without calling set_output + X_trans = ct.fit_transform(df) + assert isinstance(X_trans, np.ndarray) + assert X_trans.dtype == "float64" + + ct.set_output(transform="pandas") + X_trans_df = ct.transform(df) + expected_dtypes = { + "pet_cat": "int16", + "pet_dog": "int16", + "pet_snake": "int16", + "height": "int64", + "age": "float64", + } + for col, dtype in X_trans_df.dtypes.items(): + assert dtype == expected_dtypes[col] + + +# PandasOutTransformer that does not define get_feature_names_out and always expects +# the input to be a DataFrame. +class PandasOutTransformer(BaseEstimator): + def __init__(self, offset=1.0): + self.offset = offset + + def fit(self, X, y=None): + pd = pytest.importorskip("pandas") + assert isinstance(X, pd.DataFrame) + return self + + def transform(self, X, y=None): + pd = pytest.importorskip("pandas") + assert isinstance(X, pd.DataFrame) + return X - self.offset + + def set_output(self, transform=None): + # This transformer will always output a DataFrame regardless of the + # configuration. + return self + + +@pytest.mark.parametrize( + "trans_1, expected_verbose_names, expected_non_verbose_names", + [ + ( + PandasOutTransformer(offset=2.0), + ["trans_0__feat1", "trans_1__feat0"], + ["feat1", "feat0"], + ), + ( + "drop", + ["trans_0__feat1"], + ["feat1"], + ), + ( + "passthrough", + ["trans_0__feat1", "trans_1__feat0"], + ["feat1", "feat0"], + ), + ], +) +def test_transformers_with_pandas_out_but_not_feature_names_out( + trans_1, expected_verbose_names, expected_non_verbose_names +): + """Check that set_config(transform="pandas") is compatible with more transformers. + + Specifically, if transformers returns a DataFrame, but does not define + `get_feature_names_out`. + """ + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"feat0": [1.0, 2.0, 3.0], "feat1": [2.0, 3.0, 4.0]}) + ct = ColumnTransformer( + [ + ("trans_0", PandasOutTransformer(offset=3.0), ["feat1"]), + ("trans_1", trans_1, ["feat0"]), + ] + ) + X_trans_np = ct.fit_transform(X_df) + assert isinstance(X_trans_np, np.ndarray) + + # `ct` does not have `get_feature_names_out` because `PandasOutTransformer` does + # not define the method. + with pytest.raises(AttributeError, match="not provide get_feature_names_out"): + ct.get_feature_names_out() + + # The feature names are prefixed because verbose_feature_names_out=True is default + ct.set_output(transform="pandas") + X_trans_df0 = ct.fit_transform(X_df) + assert_array_equal(X_trans_df0.columns, expected_verbose_names) + + ct.set_params(verbose_feature_names_out=False) + X_trans_df1 = ct.fit_transform(X_df) + assert_array_equal(X_trans_df1.columns, expected_non_verbose_names) + + +@pytest.mark.parametrize( + "empty_selection", + [[], np.array([False, False]), [False, False]], + ids=["list", "bool", "bool_int"], +) +def test_empty_selection_pandas_output(empty_selection): + """Check that pandas output works when there is an empty selection. + + Non-regression test for gh-25487 + """ + pd = pytest.importorskip("pandas") + + X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"]) + ct = ColumnTransformer( + [ + ("categorical", "passthrough", empty_selection), + ("numerical", StandardScaler(), ["a", "b"]), + ], + verbose_feature_names_out=True, + ) + ct.set_output(transform="pandas") + X_out = ct.fit_transform(X) + assert_array_equal(X_out.columns, ["numerical__a", "numerical__b"]) + + ct.set_params(verbose_feature_names_out=False) + X_out = ct.fit_transform(X) + assert_array_equal(X_out.columns, ["a", "b"]) + + +def test_raise_error_if_index_not_aligned(): + """Check column transformer raises error if indices are not aligned. + + Non-regression test for gh-26210. + """ + pd = pytest.importorskip("pandas") + + X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"], index=[8, 3]) + reset_index_transformer = FunctionTransformer( + lambda x: x.reset_index(drop=True), feature_names_out="one-to-one" + ) + + ct = ColumnTransformer( + [ + ("num1", "passthrough", ["a"]), + ("num2", reset_index_transformer, ["b"]), + ], + ) + ct.set_output(transform="pandas") + msg = ( + "Concatenating DataFrames from the transformer's output lead to" + " an inconsistent number of samples. The output may have Pandas" + " Indexes that do not match." + ) + with pytest.raises(ValueError, match=msg): + ct.fit_transform(X) + + +def test_remainder_set_output(): + """Check that the output is set for the remainder. + + Non-regression test for #26306. + """ + + pd = pytest.importorskip("pandas") + df = pd.DataFrame({"a": [True, False, True], "b": [1, 2, 3]}) + + ct = make_column_transformer( + (VarianceThreshold(), make_column_selector(dtype_include=bool)), + remainder=VarianceThreshold(), + verbose_feature_names_out=False, + ) + ct.set_output(transform="pandas") + + out = ct.fit_transform(df) + pd.testing.assert_frame_equal(out, df) + + ct.set_output(transform="default") + out = ct.fit_transform(df) + assert isinstance(out, np.ndarray) + + +# TODO(1.6): replace the warning by a ValueError exception +def test_transform_pd_na(): + """Check behavior when a tranformer's output contains pandas.NA + + It should emit a warning unless the output config is set to 'pandas'. + """ + pd = pytest.importorskip("pandas") + if not hasattr(pd, "Float64Dtype"): + pytest.skip( + "The issue with pd.NA tested here does not happen in old versions that do" + " not have the extension dtypes" + ) + df = pd.DataFrame({"a": [1.5, None]}) + ct = make_column_transformer(("passthrough", ["a"])) + # No warning with non-extension dtypes and np.nan + with warnings.catch_warnings(): + warnings.simplefilter("error") + ct.fit_transform(df) + df = df.convert_dtypes() + # Error with extension dtype and pd.NA + with pytest.warns(FutureWarning, match=r"set_output\(transform='pandas'\)"): + ct.fit_transform(df) + # No warning when output is set to pandas + with warnings.catch_warnings(): + warnings.simplefilter("error") + ct.set_output(transform="pandas") + ct.fit_transform(df) + ct.set_output(transform="default") + # No warning when there are no pd.NA + with warnings.catch_warnings(): + warnings.simplefilter("error") + ct.fit_transform(df.fillna(-1.0)) + + +def test_dataframe_different_dataframe_libraries(): + """Check fitting and transforming on pandas and polars dataframes.""" + pd = pytest.importorskip("pandas") + pl = pytest.importorskip("polars") + X_train_np = np.array([[0, 1], [2, 4], [4, 5]]) + X_test_np = np.array([[1, 2], [1, 3], [2, 3]]) + + # Fit on pandas and transform on polars + X_train_pd = pd.DataFrame(X_train_np, columns=["a", "b"]) + X_test_pl = pl.DataFrame(X_test_np, schema=["a", "b"]) + + ct = make_column_transformer((Trans(), [0, 1])) + ct.fit(X_train_pd) + + out_pl_in = ct.transform(X_test_pl) + assert_array_equal(out_pl_in, X_test_np) + + # Fit on polars and transform on pandas + X_train_pl = pl.DataFrame(X_train_np, schema=["a", "b"]) + X_test_pd = pd.DataFrame(X_test_np, columns=["a", "b"]) + ct.fit(X_train_pl) + + out_pd_in = ct.transform(X_test_pd) + assert_array_equal(out_pd_in, X_test_np) + + +@pytest.mark.parametrize("transform_output", ["default", "pandas"]) +def test_column_transformer_remainder_passthrough_naming_consistency(transform_output): + """Check that when `remainder="passthrough"`, inconsistent naming is handled + correctly by the underlying `FunctionTransformer`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28232 + """ + pd = pytest.importorskip("pandas") + X = pd.DataFrame(np.random.randn(10, 4)) + + preprocessor = ColumnTransformer( + transformers=[("scaler", StandardScaler(), [0, 1])], + remainder="passthrough", + ).set_output(transform=transform_output) + X_trans = preprocessor.fit_transform(X) + assert X_trans.shape == X.shape + + expected_column_names = [ + "scaler__x0", + "scaler__x1", + "remainder__x2", + "remainder__x3", + ] + if hasattr(X_trans, "columns"): + assert X_trans.columns.tolist() == expected_column_names + assert preprocessor.get_feature_names_out().tolist() == expected_column_names + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +def test_column_transformer_column_renaming(dataframe_lib): + """Check that we properly rename columns when using `ColumnTransformer` and + selected columns are redundant between transformers. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28260 + """ + lib = pytest.importorskip(dataframe_lib) + + df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]}) + + transformer = ColumnTransformer( + transformers=[ + ("A", "passthrough", ["x1", "x2", "x3"]), + ("B", FunctionTransformer(), ["x1", "x2"]), + ("C", StandardScaler(), ["x1", "x3"]), + # special case of empty transformer + ("D", FunctionTransformer(lambda x: x[[]]), ["x1", "x2", "x3"]), + ], + verbose_feature_names_out=True, + ).set_output(transform=dataframe_lib) + df_trans = transformer.fit_transform(df) + assert list(df_trans.columns) == [ + "A__x1", + "A__x2", + "A__x3", + "B__x1", + "B__x2", + "C__x1", + "C__x3", + ] + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +def test_column_transformer_error_with_duplicated_columns(dataframe_lib): + """Check that we raise an error when using `ColumnTransformer` and + the columns names are duplicated between transformers.""" + lib = pytest.importorskip(dataframe_lib) + + df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]}) + + transformer = ColumnTransformer( + transformers=[ + ("A", "passthrough", ["x1", "x2", "x3"]), + ("B", FunctionTransformer(), ["x1", "x2"]), + ("C", StandardScaler(), ["x1", "x3"]), + # special case of empty transformer + ("D", FunctionTransformer(lambda x: x[[]]), ["x1", "x2", "x3"]), + ], + verbose_feature_names_out=False, + ).set_output(transform=dataframe_lib) + err_msg = re.escape( + "Duplicated feature names found before concatenating the outputs of the " + "transformers: ['x1', 'x2', 'x3'].\n" + "Transformer A has conflicting columns names: ['x1', 'x2', 'x3'].\n" + "Transformer B has conflicting columns names: ['x1', 'x2'].\n" + "Transformer C has conflicting columns names: ['x1', 'x3'].\n" + ) + with pytest.raises(ValueError, match=err_msg): + transformer.fit_transform(df) + + +# Metadata Routing Tests +# ====================== + + +@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"]) +def test_routing_passed_metadata_not_supported(method): + """Test that the right error message is raised when metadata is passed while + not supported when `enable_metadata_routing=False`.""" + + X = np.array([[0, 1, 2], [2, 4, 6]]).T + y = [1, 2, 3] + trs = ColumnTransformer([("trans", Trans(), [0])]).fit(X, y) + + with pytest.raises( + ValueError, match="is only supported if enable_metadata_routing=True" + ): + getattr(trs, method)([[1]], sample_weight=[1], prop="a") + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"]) +def test_metadata_routing_for_column_transformer(method): + """Test that metadata is routed correctly for column transformer.""" + X = np.array([[0, 1, 2], [2, 4, 6]]).T + y = [1, 2, 3] + registry = _Registry() + sample_weight, metadata = [1], "a" + trs = ColumnTransformer( + [ + ( + "trans", + ConsumingTransformer(registry=registry) + .set_fit_request(sample_weight=True, metadata=True) + .set_transform_request(sample_weight=True, metadata=True), + [0], + ) + ] + ) + + if method == "transform": + trs.fit(X, y) + trs.transform(X, sample_weight=sample_weight, metadata=metadata) + else: + getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata) + + assert len(registry) + for _trs in registry: + check_recorded_metadata( + obj=_trs, method=method, sample_weight=sample_weight, metadata=metadata + ) + + +@pytest.mark.usefixtures("enable_slep006") +def test_metadata_routing_no_fit_transform(): + """Test metadata routing when the sub-estimator doesn't implement + ``fit_transform``.""" + + class NoFitTransform(BaseEstimator): + def fit(self, X, y=None, sample_weight=None, metadata=None): + assert sample_weight + assert metadata + return self + + def transform(self, X, sample_weight=None, metadata=None): + assert sample_weight + assert metadata + return X + + X = np.array([[0, 1, 2], [2, 4, 6]]).T + y = [1, 2, 3] + _Registry() + sample_weight, metadata = [1], "a" + trs = ColumnTransformer( + [ + ( + "trans", + NoFitTransform() + .set_fit_request(sample_weight=True, metadata=True) + .set_transform_request(sample_weight=True, metadata=True), + [0], + ) + ] + ) + + trs.fit(X, y, sample_weight=sample_weight, metadata=metadata) + trs.fit_transform(X, y, sample_weight=sample_weight, metadata=metadata) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"]) +def test_metadata_routing_error_for_column_transformer(method): + """Test that the right error is raised when metadata is not requested.""" + X = np.array([[0, 1, 2], [2, 4, 6]]).T + y = [1, 2, 3] + sample_weight, metadata = [1], "a" + trs = ColumnTransformer([("trans", ConsumingTransformer(), [0])]) + + error_message = ( + "[sample_weight, metadata] are passed but are not explicitly set as requested" + f" or not for ConsumingTransformer.{method}" + ) + with pytest.raises(ValueError, match=re.escape(error_message)): + if method == "transform": + trs.fit(X, y) + trs.transform(X, sample_weight=sample_weight, metadata=metadata) + else: + getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata) + + +@pytest.mark.usefixtures("enable_slep006") +def test_get_metadata_routing_works_without_fit(): + # Regression test for https://github.com/scikit-learn/scikit-learn/issues/28186 + # Make sure ct.get_metadata_routing() works w/o having called fit. + ct = ColumnTransformer([("trans", ConsumingTransformer(), [0])]) + ct.get_metadata_routing() + + +@pytest.mark.usefixtures("enable_slep006") +def test_remainder_request_always_present(): + # Test that remainder request is always present. + ct = ColumnTransformer( + [("trans", StandardScaler(), [0])], + remainder=ConsumingTransformer() + .set_fit_request(metadata=True) + .set_transform_request(metadata=True), + ) + router = ct.get_metadata_routing() + assert router.consumes("fit", ["metadata"]) == set(["metadata"]) + + +@pytest.mark.usefixtures("enable_slep006") +def test_unused_transformer_request_present(): + # Test that the request of a transformer is always present even when not + # used due to no selected columns. + ct = ColumnTransformer( + [ + ( + "trans", + ConsumingTransformer() + .set_fit_request(metadata=True) + .set_transform_request(metadata=True), + lambda X: [], + ) + ] + ) + router = ct.get_metadata_routing() + assert router.consumes("fit", ["metadata"]) == set(["metadata"]) + + +# End of Metadata Routing Tests +# ============================= diff --git a/venv/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py b/venv/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py new file mode 100644 index 0000000000000000000000000000000000000000..53242b7e0277be30a9ebc1406dd8965e6bbcd96b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py @@ -0,0 +1,387 @@ +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.base import BaseEstimator, TransformerMixin, clone +from sklearn.compose import TransformedTargetRegressor +from sklearn.dummy import DummyRegressor +from sklearn.linear_model import LinearRegression, OrthogonalMatchingPursuit +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import FunctionTransformer, StandardScaler +from sklearn.utils._testing import assert_allclose, assert_no_warnings + +friedman = datasets.make_friedman1(random_state=0) + + +def test_transform_target_regressor_error(): + X, y = friedman + # provide a transformer and functions at the same time + regr = TransformedTargetRegressor( + regressor=LinearRegression(), + transformer=StandardScaler(), + func=np.exp, + inverse_func=np.log, + ) + with pytest.raises( + ValueError, + match="'transformer' and functions 'func'/'inverse_func' cannot both be set.", + ): + regr.fit(X, y) + # fit with sample_weight with a regressor which does not support it + sample_weight = np.ones((y.shape[0],)) + regr = TransformedTargetRegressor( + regressor=OrthogonalMatchingPursuit(), transformer=StandardScaler() + ) + with pytest.raises( + TypeError, + match=r"fit\(\) got an unexpected " "keyword argument 'sample_weight'", + ): + regr.fit(X, y, sample_weight=sample_weight) + # func is given but inverse_func is not + regr = TransformedTargetRegressor(func=np.exp) + with pytest.raises( + ValueError, + match="When 'func' is provided, 'inverse_func' must also be provided", + ): + regr.fit(X, y) + + +def test_transform_target_regressor_invertible(): + X, y = friedman + regr = TransformedTargetRegressor( + regressor=LinearRegression(), + func=np.sqrt, + inverse_func=np.log, + check_inverse=True, + ) + with pytest.warns( + UserWarning, + match=( + "The provided functions or" + " transformer are not strictly inverse of each other." + ), + ): + regr.fit(X, y) + regr = TransformedTargetRegressor( + regressor=LinearRegression(), func=np.sqrt, inverse_func=np.log + ) + regr.set_params(check_inverse=False) + assert_no_warnings(regr.fit, X, y) + + +def _check_standard_scaled(y, y_pred): + y_mean = np.mean(y, axis=0) + y_std = np.std(y, axis=0) + assert_allclose((y - y_mean) / y_std, y_pred) + + +def _check_shifted_by_one(y, y_pred): + assert_allclose(y + 1, y_pred) + + +def test_transform_target_regressor_functions(): + X, y = friedman + regr = TransformedTargetRegressor( + regressor=LinearRegression(), func=np.log, inverse_func=np.exp + ) + y_pred = regr.fit(X, y).predict(X) + # check the transformer output + y_tran = regr.transformer_.transform(y.reshape(-1, 1)).squeeze() + assert_allclose(np.log(y), y_tran) + assert_allclose( + y, regr.transformer_.inverse_transform(y_tran.reshape(-1, 1)).squeeze() + ) + assert y.shape == y_pred.shape + assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X))) + # check the regressor output + lr = LinearRegression().fit(X, regr.func(y)) + assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel()) + + +def test_transform_target_regressor_functions_multioutput(): + X = friedman[0] + y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T + regr = TransformedTargetRegressor( + regressor=LinearRegression(), func=np.log, inverse_func=np.exp + ) + y_pred = regr.fit(X, y).predict(X) + # check the transformer output + y_tran = regr.transformer_.transform(y) + assert_allclose(np.log(y), y_tran) + assert_allclose(y, regr.transformer_.inverse_transform(y_tran)) + assert y.shape == y_pred.shape + assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X))) + # check the regressor output + lr = LinearRegression().fit(X, regr.func(y)) + assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel()) + + +@pytest.mark.parametrize( + "X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)] +) +def test_transform_target_regressor_1d_transformer(X, y): + # All transformer in scikit-learn expect 2D data. FunctionTransformer with + # validate=False lift this constraint without checking that the input is a + # 2D vector. We check the consistency of the data shape using a 1D and 2D y + # array. + transformer = FunctionTransformer( + func=lambda x: x + 1, inverse_func=lambda x: x - 1 + ) + regr = TransformedTargetRegressor( + regressor=LinearRegression(), transformer=transformer + ) + y_pred = regr.fit(X, y).predict(X) + assert y.shape == y_pred.shape + # consistency forward transform + y_tran = regr.transformer_.transform(y) + _check_shifted_by_one(y, y_tran) + assert y.shape == y_pred.shape + # consistency inverse transform + assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze()) + # consistency of the regressor + lr = LinearRegression() + transformer2 = clone(transformer) + lr.fit(X, transformer2.fit_transform(y)) + y_lr_pred = lr.predict(X) + assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred)) + assert_allclose(regr.regressor_.coef_, lr.coef_) + + +@pytest.mark.parametrize( + "X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)] +) +def test_transform_target_regressor_2d_transformer(X, y): + # Check consistency with transformer accepting only 2D array and a 1D/2D y + # array. + transformer = StandardScaler() + regr = TransformedTargetRegressor( + regressor=LinearRegression(), transformer=transformer + ) + y_pred = regr.fit(X, y).predict(X) + assert y.shape == y_pred.shape + # consistency forward transform + if y.ndim == 1: # create a 2D array and squeeze results + y_tran = regr.transformer_.transform(y.reshape(-1, 1)) + else: + y_tran = regr.transformer_.transform(y) + _check_standard_scaled(y, y_tran.squeeze()) + assert y.shape == y_pred.shape + # consistency inverse transform + assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze()) + # consistency of the regressor + lr = LinearRegression() + transformer2 = clone(transformer) + if y.ndim == 1: # create a 2D array and squeeze results + lr.fit(X, transformer2.fit_transform(y.reshape(-1, 1)).squeeze()) + y_lr_pred = lr.predict(X).reshape(-1, 1) + y_pred2 = transformer2.inverse_transform(y_lr_pred).squeeze() + else: + lr.fit(X, transformer2.fit_transform(y)) + y_lr_pred = lr.predict(X) + y_pred2 = transformer2.inverse_transform(y_lr_pred) + + assert_allclose(y_pred, y_pred2) + assert_allclose(regr.regressor_.coef_, lr.coef_) + + +def test_transform_target_regressor_2d_transformer_multioutput(): + # Check consistency with transformer accepting only 2D array and a 2D y + # array. + X = friedman[0] + y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T + transformer = StandardScaler() + regr = TransformedTargetRegressor( + regressor=LinearRegression(), transformer=transformer + ) + y_pred = regr.fit(X, y).predict(X) + assert y.shape == y_pred.shape + # consistency forward transform + y_tran = regr.transformer_.transform(y) + _check_standard_scaled(y, y_tran) + assert y.shape == y_pred.shape + # consistency inverse transform + assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze()) + # consistency of the regressor + lr = LinearRegression() + transformer2 = clone(transformer) + lr.fit(X, transformer2.fit_transform(y)) + y_lr_pred = lr.predict(X) + assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred)) + assert_allclose(regr.regressor_.coef_, lr.coef_) + + +def test_transform_target_regressor_3d_target(): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/18866 + # Check with a 3D target with a transformer that reshapes the target + X = friedman[0] + y = np.tile(friedman[1].reshape(-1, 1, 1), [1, 3, 2]) + + def flatten_data(data): + return data.reshape(data.shape[0], -1) + + def unflatten_data(data): + return data.reshape(data.shape[0], -1, 2) + + transformer = FunctionTransformer(func=flatten_data, inverse_func=unflatten_data) + regr = TransformedTargetRegressor( + regressor=LinearRegression(), transformer=transformer + ) + y_pred = regr.fit(X, y).predict(X) + assert y.shape == y_pred.shape + + +def test_transform_target_regressor_multi_to_single(): + X = friedman[0] + y = np.transpose([friedman[1], (friedman[1] ** 2 + 1)]) + + def func(y): + out = np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2) + return out[:, np.newaxis] + + def inverse_func(y): + return y + + tt = TransformedTargetRegressor( + func=func, inverse_func=inverse_func, check_inverse=False + ) + tt.fit(X, y) + y_pred_2d_func = tt.predict(X) + assert y_pred_2d_func.shape == (100, 1) + + # force that the function only return a 1D array + def func(y): + return np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2) + + tt = TransformedTargetRegressor( + func=func, inverse_func=inverse_func, check_inverse=False + ) + tt.fit(X, y) + y_pred_1d_func = tt.predict(X) + assert y_pred_1d_func.shape == (100, 1) + + assert_allclose(y_pred_1d_func, y_pred_2d_func) + + +class DummyCheckerArrayTransformer(TransformerMixin, BaseEstimator): + def fit(self, X, y=None): + assert isinstance(X, np.ndarray) + return self + + def transform(self, X): + assert isinstance(X, np.ndarray) + return X + + def inverse_transform(self, X): + assert isinstance(X, np.ndarray) + return X + + +class DummyCheckerListRegressor(DummyRegressor): + def fit(self, X, y, sample_weight=None): + assert isinstance(X, list) + return super().fit(X, y, sample_weight) + + def predict(self, X): + assert isinstance(X, list) + return super().predict(X) + + +def test_transform_target_regressor_ensure_y_array(): + # check that the target ``y`` passed to the transformer will always be a + # numpy array. Similarly, if ``X`` is passed as a list, we check that the + # predictor receive as it is. + X, y = friedman + tt = TransformedTargetRegressor( + transformer=DummyCheckerArrayTransformer(), + regressor=DummyCheckerListRegressor(), + check_inverse=False, + ) + tt.fit(X.tolist(), y.tolist()) + tt.predict(X.tolist()) + with pytest.raises(AssertionError): + tt.fit(X, y.tolist()) + with pytest.raises(AssertionError): + tt.predict(X) + + +class DummyTransformer(TransformerMixin, BaseEstimator): + """Dummy transformer which count how many time fit was called.""" + + def __init__(self, fit_counter=0): + self.fit_counter = fit_counter + + def fit(self, X, y=None): + self.fit_counter += 1 + return self + + def transform(self, X): + return X + + def inverse_transform(self, X): + return X + + +@pytest.mark.parametrize("check_inverse", [False, True]) +def test_transform_target_regressor_count_fit(check_inverse): + # regression test for gh-issue #11618 + # check that we only call a single time fit for the transformer + X, y = friedman + ttr = TransformedTargetRegressor( + transformer=DummyTransformer(), check_inverse=check_inverse + ) + ttr.fit(X, y) + assert ttr.transformer_.fit_counter == 1 + + +class DummyRegressorWithExtraFitParams(DummyRegressor): + def fit(self, X, y, sample_weight=None, check_input=True): + # on the test below we force this to false, we make sure this is + # actually passed to the regressor + assert not check_input + return super().fit(X, y, sample_weight) + + +def test_transform_target_regressor_pass_fit_parameters(): + X, y = friedman + regr = TransformedTargetRegressor( + regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer() + ) + + regr.fit(X, y, check_input=False) + assert regr.transformer_.fit_counter == 1 + + +def test_transform_target_regressor_route_pipeline(): + X, y = friedman + + regr = TransformedTargetRegressor( + regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer() + ) + estimators = [("normalize", StandardScaler()), ("est", regr)] + + pip = Pipeline(estimators) + pip.fit(X, y, **{"est__check_input": False}) + + assert regr.transformer_.fit_counter == 1 + + +class DummyRegressorWithExtraPredictParams(DummyRegressor): + def predict(self, X, check_input=True): + # In the test below we make sure that the check input parameter is + # passed as false + self.predict_called = True + assert not check_input + return super().predict(X) + + +def test_transform_target_regressor_pass_extra_predict_parameters(): + # Checks that predict kwargs are passed to regressor. + X, y = friedman + regr = TransformedTargetRegressor( + regressor=DummyRegressorWithExtraPredictParams(), transformer=DummyTransformer() + ) + + regr.fit(X, y) + regr.predict(X, check_input=False) + assert regr.regressor_.predict_called diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8a6a1f7075082ed986568214d6109a44f7cb1ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0c82bdb5e313288ea09b9ff275717676885d3b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/random_seed.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/random_seed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9149f9cfdb5ea23d7d116e6660347ca061fdcdc Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/random_seed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..313c9b033b4bdf682bea874bdf178434a99ba0f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84b6952c28366c2216223ee5a864f779eb5cec6b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..997d7bf1d5c6f09aa8b7be7667cf2535a0bdc920 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..401052f1bf1fafb430ec3c3f7809fa83cfc2dc43 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78b0a5cb1883407fd133cef0dc44694ffbe55584 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43694239e7f119e5bc8c9126528461a4ae24ef43 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05d5b8598c334799f28572b24921339a2e56b24c Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c37bd083100d88fb6c4b828a7859c60bf91c07d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87980eb9af3c74b4d083d0d1d7c96e181af518a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f8aea70ab22f6f5f1a664d43ab9c7742e9a9cda Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3747d8fbf73a0810958fdb79580a81bbab6de702 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bb23c445467bdfa5b482c1c2af01daa1a84b4a9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d017462398acfa101ae0c5ff2ab5a5245810856d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_ridge.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_ridge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86b4e134eb8ff5b1ac3be7141dbba30da3887be9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_ridge.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metadata_routing.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metadata_routing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24481c0d32d070c4e4f79399f7257bd33153a9b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metadata_routing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..883227b8d15bde8e74336c76d2daffcba4e210c7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators_metadata_routing.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators_metadata_routing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a7cfe70625f338e50d32d0a161a2620b6ade930 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators_metadata_routing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_min_dependencies_readme.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_min_dependencies_readme.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25806d1dd26640da1f6909b6f4503850b20dd193 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_min_dependencies_readme.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multiclass.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multiclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e79baa93166be35324c49d7eb0c1f8b3e3d5a1e4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multiclass.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multioutput.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multioutput.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a071b0162c5512c24bd703b9472036032eef5248 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multioutput.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_naive_bayes.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_naive_bayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b3f27367f5cf2daa18f1b9ffedf7d994fdc3935 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_naive_bayes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_pipeline.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef3d24b6b06213d052e584dbdc7122a7d3463bb5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_pipeline.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_public_functions.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_public_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3231af9196c132350014f96157c9da9b15d960e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_public_functions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_random_projection.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_random_projection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c4274b80929e6a595a6945fbb62c83b37ecd017 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_random_projection.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/metadata_routing_common.py b/venv/lib/python3.10/site-packages/sklearn/tests/metadata_routing_common.py new file mode 100644 index 0000000000000000000000000000000000000000..e330cd3960aebeb3078822432aa11a3826d30387 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tests/metadata_routing_common.py @@ -0,0 +1,456 @@ +from functools import partial + +import numpy as np + +from sklearn.base import ( + BaseEstimator, + ClassifierMixin, + MetaEstimatorMixin, + RegressorMixin, + TransformerMixin, + clone, +) +from sklearn.metrics._scorer import _Scorer, mean_squared_error +from sklearn.model_selection import BaseCrossValidator +from sklearn.model_selection._split import GroupsConsumerMixin +from sklearn.utils._metadata_requests import ( + SIMPLE_METHODS, +) +from sklearn.utils.metadata_routing import ( + MetadataRouter, + process_routing, +) +from sklearn.utils.multiclass import _check_partial_fit_first_call + + +def record_metadata(obj, method, record_default=True, **kwargs): + """Utility function to store passed metadata to a method. + + If record_default is False, kwargs whose values are "default" are skipped. + This is so that checks on keyword arguments whose default was not changed + are skipped. + + """ + if not hasattr(obj, "_records"): + obj._records = {} + if not record_default: + kwargs = { + key: val + for key, val in kwargs.items() + if not isinstance(val, str) or (val != "default") + } + obj._records[method] = kwargs + + +def check_recorded_metadata(obj, method, split_params=tuple(), **kwargs): + """Check whether the expected metadata is passed to the object's method. + + Parameters + ---------- + obj : estimator object + sub-estimator to check routed params for + method : str + sub-estimator's method where metadata is routed to + split_params : tuple, default=empty + specifies any parameters which are to be checked as being a subset + of the original values. + """ + records = getattr(obj, "_records", dict()).get(method, dict()) + assert set(kwargs.keys()) == set(records.keys()) + for key, value in kwargs.items(): + recorded_value = records[key] + # The following condition is used to check for any specified parameters + # being a subset of the original values + if key in split_params and recorded_value is not None: + assert np.isin(recorded_value, value).all() + else: + assert recorded_value is value + + +record_metadata_not_default = partial(record_metadata, record_default=False) + + +def assert_request_is_empty(metadata_request, exclude=None): + """Check if a metadata request dict is empty. + + One can exclude a method or a list of methods from the check using the + ``exclude`` parameter. If metadata_request is a MetadataRouter, then + ``exclude`` can be of the form ``{"object" : [method, ...]}``. + """ + if isinstance(metadata_request, MetadataRouter): + for name, route_mapping in metadata_request: + if exclude is not None and name in exclude: + _exclude = exclude[name] + else: + _exclude = None + assert_request_is_empty(route_mapping.router, exclude=_exclude) + return + + exclude = [] if exclude is None else exclude + for method in SIMPLE_METHODS: + if method in exclude: + continue + mmr = getattr(metadata_request, method) + props = [ + prop + for prop, alias in mmr.requests.items() + if isinstance(alias, str) or alias is not None + ] + assert not props + + +def assert_request_equal(request, dictionary): + for method, requests in dictionary.items(): + mmr = getattr(request, method) + assert mmr.requests == requests + + empty_methods = [method for method in SIMPLE_METHODS if method not in dictionary] + for method in empty_methods: + assert not len(getattr(request, method).requests) + + +class _Registry(list): + # This list is used to get a reference to the sub-estimators, which are not + # necessarily stored on the metaestimator. We need to override __deepcopy__ + # because the sub-estimators are probably cloned, which would result in a + # new copy of the list, but we need copy and deep copy both to return the + # same instance. + def __deepcopy__(self, memo): + return self + + def __copy__(self): + return self + + +class ConsumingRegressor(RegressorMixin, BaseEstimator): + """A regressor consuming metadata. + + Parameters + ---------- + registry : list, default=None + If a list, the estimator will append itself to the list in order to have + a reference to the estimator later on. Since that reference is not + required in all tests, registration can be skipped by leaving this value + as None. + """ + + def __init__(self, registry=None): + self.registry = registry + + def partial_fit(self, X, y, sample_weight="default", metadata="default"): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default( + self, "partial_fit", sample_weight=sample_weight, metadata=metadata + ) + return self + + def fit(self, X, y, sample_weight="default", metadata="default"): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default( + self, "fit", sample_weight=sample_weight, metadata=metadata + ) + return self + + def predict(self, X, sample_weight="default", metadata="default"): + pass # pragma: no cover + + # when needed, uncomment the implementation + # record_metadata_not_default( + # self, "predict", sample_weight=sample_weight, metadata=metadata + # ) + # return np.zeros(shape=(len(X),)) + + +class NonConsumingClassifier(ClassifierMixin, BaseEstimator): + """A classifier which accepts no metadata on any method.""" + + def __init__(self, alpha=0.0): + self.alpha = alpha + + def fit(self, X, y): + self.classes_ = np.unique(y) + return self + + def partial_fit(self, X, y, classes=None): + return self + + def decision_function(self, X): + return self.predict(X) + + def predict(self, X): + return np.ones(len(X)) + + +class NonConsumingRegressor(RegressorMixin, BaseEstimator): + """A classifier which accepts no metadata on any method.""" + + def fit(self, X, y): + return self + + def partial_fit(self, X, y): + return self + + def predict(self, X): + return np.ones(len(X)) # pragma: no cover + + +class ConsumingClassifier(ClassifierMixin, BaseEstimator): + """A classifier consuming metadata. + + Parameters + ---------- + registry : list, default=None + If a list, the estimator will append itself to the list in order to have + a reference to the estimator later on. Since that reference is not + required in all tests, registration can be skipped by leaving this value + as None. + + alpha : float, default=0 + This parameter is only used to test the ``*SearchCV`` objects, and + doesn't do anything. + """ + + def __init__(self, registry=None, alpha=0.0): + self.alpha = alpha + self.registry = registry + + def partial_fit( + self, X, y, classes=None, sample_weight="default", metadata="default" + ): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default( + self, "partial_fit", sample_weight=sample_weight, metadata=metadata + ) + _check_partial_fit_first_call(self, classes) + return self + + def fit(self, X, y, sample_weight="default", metadata="default"): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default( + self, "fit", sample_weight=sample_weight, metadata=metadata + ) + self.classes_ = np.unique(y) + return self + + def predict(self, X, sample_weight="default", metadata="default"): + record_metadata_not_default( + self, "predict", sample_weight=sample_weight, metadata=metadata + ) + return np.zeros(shape=(len(X),)) + + def predict_proba(self, X, sample_weight="default", metadata="default"): + pass # pragma: no cover + + # uncomment when needed + # record_metadata_not_default( + # self, "predict_proba", sample_weight=sample_weight, metadata=metadata + # ) + # return np.asarray([[0.0, 1.0]] * len(X)) + + def predict_log_proba(self, X, sample_weight="default", metadata="default"): + pass # pragma: no cover + + # uncomment when needed + # record_metadata_not_default( + # self, "predict_log_proba", sample_weight=sample_weight, metadata=metadata + # ) + # return np.zeros(shape=(len(X), 2)) + + def decision_function(self, X, sample_weight="default", metadata="default"): + record_metadata_not_default( + self, "predict_proba", sample_weight=sample_weight, metadata=metadata + ) + return np.zeros(shape=(len(X),)) + + +class ConsumingTransformer(TransformerMixin, BaseEstimator): + """A transformer which accepts metadata on fit and transform. + + Parameters + ---------- + registry : list, default=None + If a list, the estimator will append itself to the list in order to have + a reference to the estimator later on. Since that reference is not + required in all tests, registration can be skipped by leaving this value + as None. + """ + + def __init__(self, registry=None): + self.registry = registry + + def fit(self, X, y=None, sample_weight=None, metadata=None): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default( + self, "fit", sample_weight=sample_weight, metadata=metadata + ) + return self + + def transform(self, X, sample_weight=None, metadata=None): + record_metadata( + self, "transform", sample_weight=sample_weight, metadata=metadata + ) + return X + + def fit_transform(self, X, y, sample_weight=None, metadata=None): + # implementing ``fit_transform`` is necessary since + # ``TransformerMixin.fit_transform`` doesn't route any metadata to + # ``transform``, while here we want ``transform`` to receive + # ``sample_weight`` and ``metadata``. + record_metadata( + self, "fit_transform", sample_weight=sample_weight, metadata=metadata + ) + return self.fit(X, y, sample_weight=sample_weight, metadata=metadata).transform( + X, sample_weight=sample_weight, metadata=metadata + ) + + def inverse_transform(self, X, sample_weight=None, metadata=None): + record_metadata( + self, "inverse_transform", sample_weight=sample_weight, metadata=metadata + ) + return X + + +class ConsumingScorer(_Scorer): + def __init__(self, registry=None): + super().__init__( + score_func=mean_squared_error, sign=1, kwargs={}, response_method="predict" + ) + self.registry = registry + + def _score(self, method_caller, clf, X, y, **kwargs): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default(self, "score", **kwargs) + + sample_weight = kwargs.get("sample_weight", None) + return super()._score(method_caller, clf, X, y, sample_weight=sample_weight) + + +class ConsumingSplitter(BaseCrossValidator, GroupsConsumerMixin): + def __init__(self, registry=None): + self.registry = registry + + def split(self, X, y=None, groups="default", metadata="default"): + if self.registry is not None: + self.registry.append(self) + + record_metadata_not_default(self, "split", groups=groups, metadata=metadata) + + split_index = len(X) // 2 + train_indices = list(range(0, split_index)) + test_indices = list(range(split_index, len(X))) + yield test_indices, train_indices + yield train_indices, test_indices + + def get_n_splits(self, X=None, y=None, groups=None, metadata=None): + return 2 + + def _iter_test_indices(self, X=None, y=None, groups=None): + split_index = len(X) // 2 + train_indices = list(range(0, split_index)) + test_indices = list(range(split_index, len(X))) + yield test_indices + yield train_indices + + +class MetaRegressor(MetaEstimatorMixin, RegressorMixin, BaseEstimator): + """A meta-regressor which is only a router.""" + + def __init__(self, estimator): + self.estimator = estimator + + def fit(self, X, y, **fit_params): + params = process_routing(self, "fit", **fit_params) + self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit) + + def get_metadata_routing(self): + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.estimator, method_mapping="one-to-one" + ) + return router + + +class WeightedMetaRegressor(MetaEstimatorMixin, RegressorMixin, BaseEstimator): + """A meta-regressor which is also a consumer.""" + + def __init__(self, estimator, registry=None): + self.estimator = estimator + self.registry = registry + + def fit(self, X, y, sample_weight=None, **fit_params): + if self.registry is not None: + self.registry.append(self) + + record_metadata(self, "fit", sample_weight=sample_weight) + params = process_routing(self, "fit", sample_weight=sample_weight, **fit_params) + self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit) + return self + + def predict(self, X, **predict_params): + params = process_routing(self, "predict", **predict_params) + return self.estimator_.predict(X, **params.estimator.predict) + + def get_metadata_routing(self): + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add(estimator=self.estimator, method_mapping="one-to-one") + ) + return router + + +class WeightedMetaClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator): + """A meta-estimator which also consumes sample_weight itself in ``fit``.""" + + def __init__(self, estimator, registry=None): + self.estimator = estimator + self.registry = registry + + def fit(self, X, y, sample_weight=None, **kwargs): + if self.registry is not None: + self.registry.append(self) + + record_metadata(self, "fit", sample_weight=sample_weight) + params = process_routing(self, "fit", sample_weight=sample_weight, **kwargs) + self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit) + return self + + def get_metadata_routing(self): + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add(estimator=self.estimator, method_mapping="fit") + ) + return router + + +class MetaTransformer(MetaEstimatorMixin, TransformerMixin, BaseEstimator): + """A simple meta-transformer.""" + + def __init__(self, transformer): + self.transformer = transformer + + def fit(self, X, y=None, **fit_params): + params = process_routing(self, "fit", **fit_params) + self.transformer_ = clone(self.transformer).fit(X, y, **params.transformer.fit) + return self + + def transform(self, X, y=None, **transform_params): + params = process_routing(self, "transform", **transform_params) + return self.transformer_.transform(X, **params.transformer.transform) + + def get_metadata_routing(self): + return MetadataRouter(owner=self.__class__.__name__).add( + transformer=self.transformer, method_mapping="one-to-one" + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/random_seed.py b/venv/lib/python3.10/site-packages/sklearn/tests/random_seed.py new file mode 100644 index 0000000000000000000000000000000000000000..0fffd57a1016d2a93abbf74579cba45c756686f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tests/random_seed.py @@ -0,0 +1,84 @@ +"""global_random_seed fixture + +The goal of this fixture is to prevent tests that use it to be sensitive +to a specific seed value while still being deterministic by default. + +See the documentation for the SKLEARN_TESTS_GLOBAL_RANDOM_SEED +variable for insrtuctions on how to use this fixture. + +https://scikit-learn.org/dev/computing/parallelism.html#sklearn-tests-global-random-seed +""" +from os import environ +from random import Random + +import pytest + + +# Passes the main worker's random seeds to workers +class XDistHooks: + def pytest_configure_node(self, node) -> None: + random_seeds = node.config.getoption("random_seeds") + node.workerinput["random_seeds"] = random_seeds + + +def pytest_configure(config): + if config.pluginmanager.hasplugin("xdist"): + config.pluginmanager.register(XDistHooks()) + + RANDOM_SEED_RANGE = list(range(100)) # All seeds in [0, 99] should be valid. + random_seed_var = environ.get("SKLEARN_TESTS_GLOBAL_RANDOM_SEED") + if hasattr(config, "workerinput") and "random_seeds" in config.workerinput: + # Set worker random seed from seed generated from main process + random_seeds = config.workerinput["random_seeds"] + elif random_seed_var is None: + # This is the way. + random_seeds = [42] + elif random_seed_var == "any": + # Pick-up one seed at random in the range of admissible random seeds. + random_seeds = [Random().choice(RANDOM_SEED_RANGE)] + elif random_seed_var == "all": + random_seeds = RANDOM_SEED_RANGE + else: + if "-" in random_seed_var: + start, stop = random_seed_var.split("-") + random_seeds = list(range(int(start), int(stop) + 1)) + else: + random_seeds = [int(random_seed_var)] + + if min(random_seeds) < 0 or max(random_seeds) > 99: + raise ValueError( + "The value(s) of the environment variable " + "SKLEARN_TESTS_GLOBAL_RANDOM_SEED must be in the range [0, 99] " + f"(or 'any' or 'all'), got: {random_seed_var}" + ) + config.option.random_seeds = random_seeds + + class GlobalRandomSeedPlugin: + @pytest.fixture(params=random_seeds) + def global_random_seed(self, request): + """Fixture to ask for a random yet controllable random seed. + + All tests that use this fixture accept the contract that they should + deterministically pass for any seed value from 0 to 99 included. + + See the documentation for the SKLEARN_TESTS_GLOBAL_RANDOM_SEED + variable for insrtuctions on how to use this fixture. + + https://scikit-learn.org/dev/computing/parallelism.html#sklearn-tests-global-random-seed + """ + yield request.param + + config.pluginmanager.register(GlobalRandomSeedPlugin()) + + +def pytest_report_header(config): + random_seed_var = environ.get("SKLEARN_TESTS_GLOBAL_RANDOM_SEED") + if random_seed_var == "any": + return [ + "To reproduce this test run, set the following environment variable:", + f' SKLEARN_TESTS_GLOBAL_RANDOM_SEED="{config.option.random_seeds[0]}"', + ( + "See: https://scikit-learn.org/dev/computing/parallelism.html" + "#sklearn-tests-global-random-seed" + ), + ] diff --git a/venv/lib/python3.10/site-packages/sklearn/tests/test_base.py b/venv/lib/python3.10/site-packages/sklearn/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..3bbc236e703df6f2ae037da7ee0ea2a93f289383 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/tests/test_base.py @@ -0,0 +1,921 @@ +# Author: Gael Varoquaux +# License: BSD 3 clause + +import pickle +import re +import warnings + +import numpy as np +import pytest +import scipy.sparse as sp +from numpy.testing import assert_allclose + +import sklearn +from sklearn import config_context, datasets +from sklearn.base import ( + BaseEstimator, + OutlierMixin, + TransformerMixin, + clone, + is_classifier, +) +from sklearn.decomposition import PCA +from sklearn.exceptions import InconsistentVersionWarning +from sklearn.model_selection import GridSearchCV +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.svm import SVC +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils._mocking import MockDataFrame +from sklearn.utils._set_output import _get_output_config +from sklearn.utils._testing import ( + _convert_container, + assert_array_equal, + assert_no_warnings, + ignore_warnings, +) + + +############################################################################# +# A few test classes +class MyEstimator(BaseEstimator): + def __init__(self, l1=0, empty=None): + self.l1 = l1 + self.empty = empty + + +class K(BaseEstimator): + def __init__(self, c=None, d=None): + self.c = c + self.d = d + + +class T(BaseEstimator): + def __init__(self, a=None, b=None): + self.a = a + self.b = b + + +class NaNTag(BaseEstimator): + def _more_tags(self): + return {"allow_nan": True} + + +class NoNaNTag(BaseEstimator): + def _more_tags(self): + return {"allow_nan": False} + + +class OverrideTag(NaNTag): + def _more_tags(self): + return {"allow_nan": False} + + +class DiamondOverwriteTag(NaNTag, NoNaNTag): + def _more_tags(self): + return dict() + + +class InheritDiamondOverwriteTag(DiamondOverwriteTag): + pass + + +class ModifyInitParams(BaseEstimator): + """Deprecated behavior. + Equal parameters but with a type cast. + Doesn't fulfill a is a + """ + + def __init__(self, a=np.array([0])): + self.a = a.copy() + + +class Buggy(BaseEstimator): + "A buggy estimator that does not set its parameters right." + + def __init__(self, a=None): + self.a = 1 + + +class NoEstimator: + def __init__(self): + pass + + def fit(self, X=None, y=None): + return self + + def predict(self, X=None): + return None + + +class VargEstimator(BaseEstimator): + """scikit-learn estimators shouldn't have vargs.""" + + def __init__(self, *vargs): + pass + + +############################################################################# +# The tests + + +def test_clone(): + # Tests that clone creates a correct deep copy. + # We create an estimator, make a copy of its original state + # (which, in this case, is the current state of the estimator), + # and check that the obtained copy is a correct deep copy. + + from sklearn.feature_selection import SelectFpr, f_classif + + selector = SelectFpr(f_classif, alpha=0.1) + new_selector = clone(selector) + assert selector is not new_selector + assert selector.get_params() == new_selector.get_params() + + selector = SelectFpr(f_classif, alpha=np.zeros((10, 2))) + new_selector = clone(selector) + assert selector is not new_selector + + +def test_clone_2(): + # Tests that clone doesn't copy everything. + # We first create an estimator, give it an own attribute, and + # make a copy of its original state. Then we check that the copy doesn't + # have the specific attribute we manually added to the initial estimator. + + from sklearn.feature_selection import SelectFpr, f_classif + + selector = SelectFpr(f_classif, alpha=0.1) + selector.own_attribute = "test" + new_selector = clone(selector) + assert not hasattr(new_selector, "own_attribute") + + +def test_clone_buggy(): + # Check that clone raises an error on buggy estimators. + buggy = Buggy() + buggy.a = 2 + with pytest.raises(RuntimeError): + clone(buggy) + + no_estimator = NoEstimator() + with pytest.raises(TypeError): + clone(no_estimator) + + varg_est = VargEstimator() + with pytest.raises(RuntimeError): + clone(varg_est) + + est = ModifyInitParams() + with pytest.raises(RuntimeError): + clone(est) + + +def test_clone_empty_array(): + # Regression test for cloning estimators with empty arrays + clf = MyEstimator(empty=np.array([])) + clf2 = clone(clf) + assert_array_equal(clf.empty, clf2.empty) + + clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]]))) + clf2 = clone(clf) + assert_array_equal(clf.empty.data, clf2.empty.data) + + +def test_clone_nan(): + # Regression test for cloning estimators with default parameter as np.nan + clf = MyEstimator(empty=np.nan) + clf2 = clone(clf) + + assert clf.empty is clf2.empty + + +def test_clone_dict(): + # test that clone creates a clone of a dict + orig = {"a": MyEstimator()} + cloned = clone(orig) + assert orig["a"] is not cloned["a"] + + +def test_clone_sparse_matrices(): + sparse_matrix_classes = [ + cls + for name in dir(sp) + if name.endswith("_matrix") and type(cls := getattr(sp, name)) is type + ] + + for cls in sparse_matrix_classes: + sparse_matrix = cls(np.eye(5)) + clf = MyEstimator(empty=sparse_matrix) + clf_cloned = clone(clf) + assert clf.empty.__class__ is clf_cloned.empty.__class__ + assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray()) + + +def test_clone_estimator_types(): + # Check that clone works for parameters that are types rather than + # instances + clf = MyEstimator(empty=MyEstimator) + clf2 = clone(clf) + + assert clf.empty is clf2.empty + + +def test_clone_class_rather_than_instance(): + # Check that clone raises expected error message when + # cloning class rather than instance + msg = "You should provide an instance of scikit-learn estimator" + with pytest.raises(TypeError, match=msg): + clone(MyEstimator) + + +def test_repr(): + # Smoke test the repr of the base estimator. + my_estimator = MyEstimator() + repr(my_estimator) + test = T(K(), K()) + assert repr(test) == "T(a=K(), b=K())" + + some_est = T(a=["long_params"] * 1000) + assert len(repr(some_est)) == 485 + + +def test_str(): + # Smoke test the str of the base estimator + my_estimator = MyEstimator() + str(my_estimator) + + +def test_get_params(): + test = T(K(), K) + + assert "a__d" in test.get_params(deep=True) + assert "a__d" not in test.get_params(deep=False) + + test.set_params(a__d=2) + assert test.a.d == 2 + + with pytest.raises(ValueError): + test.set_params(a__a=2) + + +def test_is_classifier(): + svc = SVC() + assert is_classifier(svc) + assert is_classifier(GridSearchCV(svc, {"C": [0.1, 1]})) + assert is_classifier(Pipeline([("svc", svc)])) + assert is_classifier(Pipeline([("svc_cv", GridSearchCV(svc, {"C": [0.1, 1]}))])) + + +def test_set_params(): + # test nested estimator parameter setting + clf = Pipeline([("svc", SVC())]) + + # non-existing parameter in svc + with pytest.raises(ValueError): + clf.set_params(svc__stupid_param=True) + + # non-existing parameter of pipeline + with pytest.raises(ValueError): + clf.set_params(svm__stupid_param=True) + + # we don't currently catch if the things in pipeline are estimators + # bad_pipeline = Pipeline([("bad", NoEstimator())]) + # assert_raises(AttributeError, bad_pipeline.set_params, + # bad__stupid_param=True) + + +def test_set_params_passes_all_parameters(): + # Make sure all parameters are passed together to set_params + # of nested estimator. Regression test for #9944 + + class TestDecisionTree(DecisionTreeClassifier): + def set_params(self, **kwargs): + super().set_params(**kwargs) + # expected_kwargs is in test scope + assert kwargs == expected_kwargs + return self + + expected_kwargs = {"max_depth": 5, "min_samples_leaf": 2} + for est in [ + Pipeline([("estimator", TestDecisionTree())]), + GridSearchCV(TestDecisionTree(), {}), + ]: + est.set_params(estimator__max_depth=5, estimator__min_samples_leaf=2) + + +def test_set_params_updates_valid_params(): + # Check that set_params tries to set SVC().C, not + # DecisionTreeClassifier().C + gscv = GridSearchCV(DecisionTreeClassifier(), {}) + gscv.set_params(estimator=SVC(), estimator__C=42.0) + assert gscv.estimator.C == 42.0 + + +@pytest.mark.parametrize( + "tree,dataset", + [ + ( + DecisionTreeClassifier(max_depth=2, random_state=0), + datasets.make_classification(random_state=0), + ), + ( + DecisionTreeRegressor(max_depth=2, random_state=0), + datasets.make_regression(random_state=0), + ), + ], +) +def test_score_sample_weight(tree, dataset): + rng = np.random.RandomState(0) + # check that the score with and without sample weights are different + X, y = dataset + + tree.fit(X, y) + # generate random sample weights + sample_weight = rng.randint(1, 10, size=len(y)) + score_unweighted = tree.score(X, y) + score_weighted = tree.score(X, y, sample_weight=sample_weight) + msg = "Unweighted and weighted scores are unexpectedly equal" + assert score_unweighted != score_weighted, msg + + +def test_clone_pandas_dataframe(): + class DummyEstimator(TransformerMixin, BaseEstimator): + """This is a dummy class for generating numerical features + + This feature extractor extracts numerical features from pandas data + frame. + + Parameters + ---------- + + df: pandas data frame + The pandas data frame parameter. + + Notes + ----- + """ + + def __init__(self, df=None, scalar_param=1): + self.df = df + self.scalar_param = scalar_param + + def fit(self, X, y=None): + pass + + def transform(self, X): + pass + + # build and clone estimator + d = np.arange(10) + df = MockDataFrame(d) + e = DummyEstimator(df, scalar_param=1) + cloned_e = clone(e) + + # the test + assert (e.df == cloned_e.df).values.all() + assert e.scalar_param == cloned_e.scalar_param + + +def test_clone_protocol(): + """Checks that clone works with `__sklearn_clone__` protocol.""" + + class FrozenEstimator(BaseEstimator): + def __init__(self, fitted_estimator): + self.fitted_estimator = fitted_estimator + + def __getattr__(self, name): + return getattr(self.fitted_estimator, name) + + def __sklearn_clone__(self): + return self + + def fit(self, *args, **kwargs): + return self + + def fit_transform(self, *args, **kwargs): + return self.fitted_estimator.transform(*args, **kwargs) + + X = np.array([[-1, -1], [-2, -1], [-3, -2]]) + pca = PCA().fit(X) + components = pca.components_ + + frozen_pca = FrozenEstimator(pca) + assert_allclose(frozen_pca.components_, components) + + # Calling PCA methods such as `get_feature_names_out` still works + assert_array_equal(frozen_pca.get_feature_names_out(), pca.get_feature_names_out()) + + # Fitting on a new data does not alter `components_` + X_new = np.asarray([[-1, 2], [3, 4], [1, 2]]) + frozen_pca.fit(X_new) + assert_allclose(frozen_pca.components_, components) + + # `fit_transform` does not alter state + frozen_pca.fit_transform(X_new) + assert_allclose(frozen_pca.components_, components) + + # Cloning estimator is a no-op + clone_frozen_pca = clone(frozen_pca) + assert clone_frozen_pca is frozen_pca + assert_allclose(clone_frozen_pca.components_, components) + + +def test_pickle_version_warning_is_not_raised_with_matching_version(): + iris = datasets.load_iris() + tree = DecisionTreeClassifier().fit(iris.data, iris.target) + tree_pickle = pickle.dumps(tree) + assert b"_sklearn_version" in tree_pickle + tree_restored = assert_no_warnings(pickle.loads, tree_pickle) + + # test that we can predict with the restored decision tree classifier + score_of_original = tree.score(iris.data, iris.target) + score_of_restored = tree_restored.score(iris.data, iris.target) + assert score_of_original == score_of_restored + + +class TreeBadVersion(DecisionTreeClassifier): + def __getstate__(self): + return dict(self.__dict__.items(), _sklearn_version="something") + + +pickle_error_message = ( + "Trying to unpickle estimator {estimator} from " + "version {old_version} when using version " + "{current_version}. This might " + "lead to breaking code or invalid results. " + "Use at your own risk." +) + + +def test_pickle_version_warning_is_issued_upon_different_version(): + iris = datasets.load_iris() + tree = TreeBadVersion().fit(iris.data, iris.target) + tree_pickle_other = pickle.dumps(tree) + message = pickle_error_message.format( + estimator="TreeBadVersion", + old_version="something", + current_version=sklearn.__version__, + ) + with pytest.warns(UserWarning, match=message) as warning_record: + pickle.loads(tree_pickle_other) + + message = warning_record.list[0].message + assert isinstance(message, InconsistentVersionWarning) + assert message.estimator_name == "TreeBadVersion" + assert message.original_sklearn_version == "something" + assert message.current_sklearn_version == sklearn.__version__ + + +class TreeNoVersion(DecisionTreeClassifier): + def __getstate__(self): + return self.__dict__ + + +def test_pickle_version_warning_is_issued_when_no_version_info_in_pickle(): + iris = datasets.load_iris() + # TreeNoVersion has no getstate, like pre-0.18 + tree = TreeNoVersion().fit(iris.data, iris.target) + + tree_pickle_noversion = pickle.dumps(tree) + assert b"_sklearn_version" not in tree_pickle_noversion + message = pickle_error_message.format( + estimator="TreeNoVersion", + old_version="pre-0.18", + current_version=sklearn.__version__, + ) + # check we got the warning about using pre-0.18 pickle + with pytest.warns(UserWarning, match=message): + pickle.loads(tree_pickle_noversion) + + +def test_pickle_version_no_warning_is_issued_with_non_sklearn_estimator(): + iris = datasets.load_iris() + tree = TreeNoVersion().fit(iris.data, iris.target) + tree_pickle_noversion = pickle.dumps(tree) + try: + module_backup = TreeNoVersion.__module__ + TreeNoVersion.__module__ = "notsklearn" + assert_no_warnings(pickle.loads, tree_pickle_noversion) + finally: + TreeNoVersion.__module__ = module_backup + + +class DontPickleAttributeMixin: + def __getstate__(self): + data = self.__dict__.copy() + data["_attribute_not_pickled"] = None + return data + + def __setstate__(self, state): + state["_restored"] = True + self.__dict__.update(state) + + +class MultiInheritanceEstimator(DontPickleAttributeMixin, BaseEstimator): + def __init__(self, attribute_pickled=5): + self.attribute_pickled = attribute_pickled + self._attribute_not_pickled = None + + +def test_pickling_when_getstate_is_overwritten_by_mixin(): + estimator = MultiInheritanceEstimator() + estimator._attribute_not_pickled = "this attribute should not be pickled" + + serialized = pickle.dumps(estimator) + estimator_restored = pickle.loads(serialized) + assert estimator_restored.attribute_pickled == 5 + assert estimator_restored._attribute_not_pickled is None + assert estimator_restored._restored + + +def test_pickling_when_getstate_is_overwritten_by_mixin_outside_of_sklearn(): + try: + estimator = MultiInheritanceEstimator() + text = "this attribute should not be pickled" + estimator._attribute_not_pickled = text + old_mod = type(estimator).__module__ + type(estimator).__module__ = "notsklearn" + + serialized = estimator.__getstate__() + assert serialized == {"_attribute_not_pickled": None, "attribute_pickled": 5} + + serialized["attribute_pickled"] = 4 + estimator.__setstate__(serialized) + assert estimator.attribute_pickled == 4 + assert estimator._restored + finally: + type(estimator).__module__ = old_mod + + +class SingleInheritanceEstimator(BaseEstimator): + def __init__(self, attribute_pickled=5): + self.attribute_pickled = attribute_pickled + self._attribute_not_pickled = None + + def __getstate__(self): + data = self.__dict__.copy() + data["_attribute_not_pickled"] = None + return data + + +@ignore_warnings(category=(UserWarning)) +def test_pickling_works_when_getstate_is_overwritten_in_the_child_class(): + estimator = SingleInheritanceEstimator() + estimator._attribute_not_pickled = "this attribute should not be pickled" + + serialized = pickle.dumps(estimator) + estimator_restored = pickle.loads(serialized) + assert estimator_restored.attribute_pickled == 5 + assert estimator_restored._attribute_not_pickled is None + + +def test_tag_inheritance(): + # test that changing tags by inheritance is not allowed + + nan_tag_est = NaNTag() + no_nan_tag_est = NoNaNTag() + assert nan_tag_est._get_tags()["allow_nan"] + assert not no_nan_tag_est._get_tags()["allow_nan"] + + redefine_tags_est = OverrideTag() + assert not redefine_tags_est._get_tags()["allow_nan"] + + diamond_tag_est = DiamondOverwriteTag() + assert diamond_tag_est._get_tags()["allow_nan"] + + inherit_diamond_tag_est = InheritDiamondOverwriteTag() + assert inherit_diamond_tag_est._get_tags()["allow_nan"] + + +def test_raises_on_get_params_non_attribute(): + class MyEstimator(BaseEstimator): + def __init__(self, param=5): + pass + + def fit(self, X, y=None): + return self + + est = MyEstimator() + msg = "'MyEstimator' object has no attribute 'param'" + + with pytest.raises(AttributeError, match=msg): + est.get_params() + + +def test_repr_mimebundle_(): + # Checks the display configuration flag controls the json output + tree = DecisionTreeClassifier() + output = tree._repr_mimebundle_() + assert "text/plain" in output + assert "text/html" in output + + with config_context(display="text"): + output = tree._repr_mimebundle_() + assert "text/plain" in output + assert "text/html" not in output + + +def test_repr_html_wraps(): + # Checks the display configuration flag controls the html output + tree = DecisionTreeClassifier() + + output = tree._repr_html_() + assert "