diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5915982b8d1df129854b9658ccc03e635bc81207 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_bagging.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_bagging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b70b831a7e30e7967d095f2cea266f420b627c0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_bagging.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_weight_boosting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_weight_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fdc003d989656fce2b5da38765acddc37aa1eb5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_weight_boosting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aaf77c98248b6290646fbf2bbe895bfaad49729a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/binning.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/binning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b0261c9f6b86eadf169d02db136953c5c60c132 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/binning.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/gradient_boosting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/gradient_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7138896b7f01130d4e2b5d92d692b3257ed76ac9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/gradient_boosting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/grower.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/grower.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b084d577b8cb88e16ac3c34fb87e9b13e619fb7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/grower.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/predictor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/predictor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e0680a4d327ded3a3502c91eb01a6c7a0867797 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/predictor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68bea2093b0fd3437c2188937ff709a722021b05 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4ae20441193387d8bbc465c02ea6e6aa0346ecd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7294970a8f5a84b16623d36c5c3176229ecee37a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_compare_lightgbm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_compare_lightgbm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0a838ac499752de2da69f60e07ed23c2cccb6df Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_compare_lightgbm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_gradient_boosting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_gradient_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f36da5ba27a3cba84ef2c9b837c6363865ba7b31 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_gradient_boosting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d6783e22525e851ce0b5a00d93afab3c8611141 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67f2cf085673a28a090d45892bf9d83c7cb46c31 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_contraints.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_contraints.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c538e5e0afbc6751ebd0c9f9feaaa2a8a26d6f8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_contraints.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bde0248e500b5b955188349a98c2c882f3b07a2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b58dcf71a0e4f2c6c0eb179c60314279ab545e8e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cd3a1bbe50a4cd4fb9a8a0287f5111c420257c0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e0814d0bc102555ca7f4214d618411704e2bfb3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..059a32640df3ac13053f621352fa6a13a522521d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..567cd653c5db51ad0bb191dca2675e5a6d2687d9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28b1bbc6e3d612fc020f7f0c9d2bcc0b3c972592 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b8e757b27630886e893a6ea3292bc138d01145e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6ec7376c0f9f67c7518c4040ec9c94bf4cbec4b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dbfe13a56c7e352284f06a973f10bbbeeba2834 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0018196ffc986e82f2cc0f20c25b7d6bc13942b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__init__.py @@ -0,0 +1,8 @@ +""" +The :mod:`sklearn.mixture` module implements mixture modeling algorithms. +""" + +from ._bayesian_mixture import BayesianGaussianMixture +from ._gaussian_mixture import GaussianMixture + +__all__ = ["GaussianMixture", "BayesianGaussianMixture"] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a144a92df6a12e660d5558d265bf20a5d37c070 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8bbd2f1011986c0ba466eaa2f8e75b501965d16 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_bayesian_mixture.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_bayesian_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..857604878a1785e3de10b408a78ec36166650ddf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_bayesian_mixture.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_gaussian_mixture.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_gaussian_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..000d6dc68f5c29a5982ef44bf96a64934a233679 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_gaussian_mixture.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_base.py b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..9fb1c232c1012459a381a5e575fe643622cfcad5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_base.py @@ -0,0 +1,560 @@ +"""Base class for mixture models.""" + +# Author: Wei Xue +# Modified by Thierry Guillemot +# License: BSD 3 clause + +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real +from time import time + +import numpy as np +from scipy.special import logsumexp + +from .. import cluster +from ..base import BaseEstimator, DensityMixin, _fit_context +from ..cluster import kmeans_plusplus +from ..exceptions import ConvergenceWarning +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions +from ..utils.validation import check_is_fitted + + +def _check_shape(param, param_shape, name): + """Validate the shape of the input parameter 'param'. + + Parameters + ---------- + param : array + + param_shape : tuple + + name : str + """ + param = np.array(param) + if param.shape != param_shape: + raise ValueError( + "The parameter '%s' should have the shape of %s, but got %s" + % (name, param_shape, param.shape) + ) + + +class BaseMixture(DensityMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for mixture models. + + This abstract class specifies an interface for all mixture classes and + provides basic common methods for mixture models. + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="left")], + "reg_covar": [Interval(Real, 0.0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "n_init": [Interval(Integral, 1, None, closed="left")], + "init_params": [ + StrOptions({"kmeans", "random", "random_from_data", "k-means++"}) + ], + "random_state": ["random_state"], + "warm_start": ["boolean"], + "verbose": ["verbose"], + "verbose_interval": [Interval(Integral, 1, None, closed="left")], + } + + def __init__( + self, + n_components, + tol, + reg_covar, + max_iter, + n_init, + init_params, + random_state, + warm_start, + verbose, + verbose_interval, + ): + self.n_components = n_components + self.tol = tol + self.reg_covar = reg_covar + self.max_iter = max_iter + self.n_init = n_init + self.init_params = init_params + self.random_state = random_state + self.warm_start = warm_start + self.verbose = verbose + self.verbose_interval = verbose_interval + + @abstractmethod + def _check_parameters(self, X): + """Check initial parameters of the derived class. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + pass + + def _initialize_parameters(self, X, random_state): + """Initialize the model parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + random_state : RandomState + A random number generator instance that controls the random seed + used for the method chosen to initialize the parameters. + """ + n_samples, _ = X.shape + + if self.init_params == "kmeans": + resp = np.zeros((n_samples, self.n_components)) + label = ( + cluster.KMeans( + n_clusters=self.n_components, n_init=1, random_state=random_state + ) + .fit(X) + .labels_ + ) + resp[np.arange(n_samples), label] = 1 + elif self.init_params == "random": + resp = random_state.uniform(size=(n_samples, self.n_components)) + resp /= resp.sum(axis=1)[:, np.newaxis] + elif self.init_params == "random_from_data": + resp = np.zeros((n_samples, self.n_components)) + indices = random_state.choice( + n_samples, size=self.n_components, replace=False + ) + resp[indices, np.arange(self.n_components)] = 1 + elif self.init_params == "k-means++": + resp = np.zeros((n_samples, self.n_components)) + _, indices = kmeans_plusplus( + X, + self.n_components, + random_state=random_state, + ) + resp[indices, np.arange(self.n_components)] = 1 + + self._initialize(X, resp) + + @abstractmethod + def _initialize(self, X, resp): + """Initialize the model parameters of the derived class. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + resp : array-like of shape (n_samples, n_components) + """ + pass + + def fit(self, X, y=None): + """Estimate model parameters with the EM algorithm. + + The method fits the model ``n_init`` times and sets the parameters with + which the model has the largest likelihood or lower bound. Within each + trial, the method iterates between E-step and M-step for ``max_iter`` + times until the change of likelihood or lower bound is less than + ``tol``, otherwise, a ``ConvergenceWarning`` is raised. + If ``warm_start`` is ``True``, then ``n_init`` is ignored and a single + initialization is performed upon the first call. Upon consecutive + calls, training starts where it left off. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + The fitted mixture. + """ + # parameters are validated in fit_predict + self.fit_predict(X, y) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_predict(self, X, y=None): + """Estimate model parameters using X and predict the labels for X. + + The method fits the model n_init times and sets the parameters with + which the model has the largest likelihood or lower bound. Within each + trial, the method iterates between E-step and M-step for `max_iter` + times until the change of likelihood or lower bound is less than + `tol`, otherwise, a :class:`~sklearn.exceptions.ConvergenceWarning` is + raised. After fitting, it predicts the most probable label for the + input data points. + + .. versionadded:: 0.20 + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + labels : array, shape (n_samples,) + Component labels. + """ + X = self._validate_data(X, dtype=[np.float64, np.float32], ensure_min_samples=2) + if X.shape[0] < self.n_components: + raise ValueError( + "Expected n_samples >= n_components " + f"but got n_components = {self.n_components}, " + f"n_samples = {X.shape[0]}" + ) + self._check_parameters(X) + + # if we enable warm_start, we will have a unique initialisation + do_init = not (self.warm_start and hasattr(self, "converged_")) + n_init = self.n_init if do_init else 1 + + max_lower_bound = -np.inf + self.converged_ = False + + random_state = check_random_state(self.random_state) + + n_samples, _ = X.shape + for init in range(n_init): + self._print_verbose_msg_init_beg(init) + + if do_init: + self._initialize_parameters(X, random_state) + + lower_bound = -np.inf if do_init else self.lower_bound_ + + if self.max_iter == 0: + best_params = self._get_parameters() + best_n_iter = 0 + else: + for n_iter in range(1, self.max_iter + 1): + prev_lower_bound = lower_bound + + log_prob_norm, log_resp = self._e_step(X) + self._m_step(X, log_resp) + lower_bound = self._compute_lower_bound(log_resp, log_prob_norm) + + change = lower_bound - prev_lower_bound + self._print_verbose_msg_iter_end(n_iter, change) + + if abs(change) < self.tol: + self.converged_ = True + break + + self._print_verbose_msg_init_end(lower_bound) + + if lower_bound > max_lower_bound or max_lower_bound == -np.inf: + max_lower_bound = lower_bound + best_params = self._get_parameters() + best_n_iter = n_iter + + # Should only warn about convergence if max_iter > 0, otherwise + # the user is assumed to have used 0-iters initialization + # to get the initial means. + if not self.converged_ and self.max_iter > 0: + warnings.warn( + "Initialization %d did not converge. " + "Try different init parameters, " + "or increase max_iter, tol " + "or check for degenerate data." % (init + 1), + ConvergenceWarning, + ) + + self._set_parameters(best_params) + self.n_iter_ = best_n_iter + self.lower_bound_ = max_lower_bound + + # Always do a final e-step to guarantee that the labels returned by + # fit_predict(X) are always consistent with fit(X).predict(X) + # for any value of max_iter and tol (and any random_state). + _, log_resp = self._e_step(X) + + return log_resp.argmax(axis=1) + + def _e_step(self, X): + """E step. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + log_prob_norm : float + Mean of the logarithms of the probabilities of each sample in X + + log_responsibility : array, shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + """ + log_prob_norm, log_resp = self._estimate_log_prob_resp(X) + return np.mean(log_prob_norm), log_resp + + @abstractmethod + def _m_step(self, X, log_resp): + """M step. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + log_resp : array-like of shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + """ + pass + + @abstractmethod + def _get_parameters(self): + pass + + @abstractmethod + def _set_parameters(self, params): + pass + + def score_samples(self, X): + """Compute the log-likelihood of each sample. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + Returns + ------- + log_prob : array, shape (n_samples,) + Log-likelihood of each sample in `X` under the current model. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False) + + return logsumexp(self._estimate_weighted_log_prob(X), axis=1) + + def score(self, X, y=None): + """Compute the per-sample average log-likelihood of the given data X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_dimensions) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + log_likelihood : float + Log-likelihood of `X` under the Gaussian mixture model. + """ + return self.score_samples(X).mean() + + def predict(self, X): + """Predict the labels for the data samples in X using trained model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + Returns + ------- + labels : array, shape (n_samples,) + Component labels. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False) + return self._estimate_weighted_log_prob(X).argmax(axis=1) + + def predict_proba(self, X): + """Evaluate the components' density for each sample. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + Returns + ------- + resp : array, shape (n_samples, n_components) + Density of each Gaussian component for each sample in X. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False) + _, log_resp = self._estimate_log_prob_resp(X) + return np.exp(log_resp) + + def sample(self, n_samples=1): + """Generate random samples from the fitted Gaussian distribution. + + Parameters + ---------- + n_samples : int, default=1 + Number of samples to generate. + + Returns + ------- + X : array, shape (n_samples, n_features) + Randomly generated sample. + + y : array, shape (nsamples,) + Component labels. + """ + check_is_fitted(self) + + if n_samples < 1: + raise ValueError( + "Invalid value for 'n_samples': %d . The sampling requires at " + "least one sample." % (self.n_components) + ) + + _, n_features = self.means_.shape + rng = check_random_state(self.random_state) + n_samples_comp = rng.multinomial(n_samples, self.weights_) + + if self.covariance_type == "full": + X = np.vstack( + [ + rng.multivariate_normal(mean, covariance, int(sample)) + for (mean, covariance, sample) in zip( + self.means_, self.covariances_, n_samples_comp + ) + ] + ) + elif self.covariance_type == "tied": + X = np.vstack( + [ + rng.multivariate_normal(mean, self.covariances_, int(sample)) + for (mean, sample) in zip(self.means_, n_samples_comp) + ] + ) + else: + X = np.vstack( + [ + mean + + rng.standard_normal(size=(sample, n_features)) + * np.sqrt(covariance) + for (mean, covariance, sample) in zip( + self.means_, self.covariances_, n_samples_comp + ) + ] + ) + + y = np.concatenate( + [np.full(sample, j, dtype=int) for j, sample in enumerate(n_samples_comp)] + ) + + return (X, y) + + def _estimate_weighted_log_prob(self, X): + """Estimate the weighted log-probabilities, log P(X | Z) + log weights. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + weighted_log_prob : array, shape (n_samples, n_component) + """ + return self._estimate_log_prob(X) + self._estimate_log_weights() + + @abstractmethod + def _estimate_log_weights(self): + """Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm. + + Returns + ------- + log_weight : array, shape (n_components, ) + """ + pass + + @abstractmethod + def _estimate_log_prob(self, X): + """Estimate the log-probabilities log P(X | Z). + + Compute the log-probabilities per each component for each sample. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + log_prob : array, shape (n_samples, n_component) + """ + pass + + def _estimate_log_prob_resp(self, X): + """Estimate log probabilities and responsibilities for each sample. + + Compute the log probabilities, weighted log probabilities per + component and responsibilities for each sample in X with respect to + the current state of the model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + log_prob_norm : array, shape (n_samples,) + log p(X) + + log_responsibilities : array, shape (n_samples, n_components) + logarithm of the responsibilities + """ + weighted_log_prob = self._estimate_weighted_log_prob(X) + log_prob_norm = logsumexp(weighted_log_prob, axis=1) + with np.errstate(under="ignore"): + # ignore underflow + log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis] + return log_prob_norm, log_resp + + def _print_verbose_msg_init_beg(self, n_init): + """Print verbose message on initialization.""" + if self.verbose == 1: + print("Initialization %d" % n_init) + elif self.verbose >= 2: + print("Initialization %d" % n_init) + self._init_prev_time = time() + self._iter_prev_time = self._init_prev_time + + def _print_verbose_msg_iter_end(self, n_iter, diff_ll): + """Print verbose message on initialization.""" + if n_iter % self.verbose_interval == 0: + if self.verbose == 1: + print(" Iteration %d" % n_iter) + elif self.verbose >= 2: + cur_time = time() + print( + " Iteration %d\t time lapse %.5fs\t ll change %.5f" + % (n_iter, cur_time - self._iter_prev_time, diff_ll) + ) + self._iter_prev_time = cur_time + + def _print_verbose_msg_init_end(self, ll): + """Print verbose message on the end of iteration.""" + if self.verbose == 1: + print("Initialization converged: %s" % self.converged_) + elif self.verbose >= 2: + print( + "Initialization converged: %s\t time lapse %.5fs\t ll %.5f" + % (self.converged_, time() - self._init_prev_time, ll) + ) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_bayesian_mixture.py b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_bayesian_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..f4169b3e1f4ee847d5963c812950e2e9273268e7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_bayesian_mixture.py @@ -0,0 +1,888 @@ +"""Bayesian Gaussian Mixture Model.""" +# Author: Wei Xue +# Thierry Guillemot +# License: BSD 3 clause + +import math +from numbers import Real + +import numpy as np +from scipy.special import betaln, digamma, gammaln + +from ..utils import check_array +from ..utils._param_validation import Interval, StrOptions +from ._base import BaseMixture, _check_shape +from ._gaussian_mixture import ( + _check_precision_matrix, + _check_precision_positivity, + _compute_log_det_cholesky, + _compute_precision_cholesky, + _estimate_gaussian_parameters, + _estimate_log_gaussian_prob, +) + + +def _log_dirichlet_norm(dirichlet_concentration): + """Compute the log of the Dirichlet distribution normalization term. + + Parameters + ---------- + dirichlet_concentration : array-like of shape (n_samples,) + The parameters values of the Dirichlet distribution. + + Returns + ------- + log_dirichlet_norm : float + The log normalization of the Dirichlet distribution. + """ + return gammaln(np.sum(dirichlet_concentration)) - np.sum( + gammaln(dirichlet_concentration) + ) + + +def _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features): + """Compute the log of the Wishart distribution normalization term. + + Parameters + ---------- + degrees_of_freedom : array-like of shape (n_components,) + The number of degrees of freedom on the covariance Wishart + distributions. + + log_det_precision_chol : array-like of shape (n_components,) + The determinant of the precision matrix for each component. + + n_features : int + The number of features. + + Return + ------ + log_wishart_norm : array-like of shape (n_components,) + The log normalization of the Wishart distribution. + """ + # To simplify the computation we have removed the np.log(np.pi) term + return -( + degrees_of_freedom * log_det_precisions_chol + + degrees_of_freedom * n_features * 0.5 * math.log(2.0) + + np.sum( + gammaln(0.5 * (degrees_of_freedom - np.arange(n_features)[:, np.newaxis])), + 0, + ) + ) + + +class BayesianGaussianMixture(BaseMixture): + """Variational Bayesian estimation of a Gaussian mixture. + + This class allows to infer an approximate posterior distribution over the + parameters of a Gaussian mixture distribution. The effective number of + components can be inferred from the data. + + This class implements two types of prior for the weights distribution: a + finite mixture model with Dirichlet distribution and an infinite mixture + model with the Dirichlet Process. In practice Dirichlet Process inference + algorithm is approximated and uses a truncated distribution with a fixed + maximum number of components (called the Stick-breaking representation). + The number of components actually used almost always depends on the data. + + .. versionadded:: 0.18 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=1 + The number of mixture components. Depending on the data and the value + of the `weight_concentration_prior` the model can decide to not use + all the components by setting some component `weights_` to values very + close to zero. The number of effective components is therefore smaller + than n_components. + + covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full' + String describing the type of covariance parameters to use. + Must be one of:: + + 'full' (each component has its own general covariance matrix), + 'tied' (all components share the same general covariance matrix), + 'diag' (each component has its own diagonal covariance matrix), + 'spherical' (each component has its own single variance). + + tol : float, default=1e-3 + The convergence threshold. EM iterations will stop when the + lower bound average gain on the likelihood (of the training data with + respect to the model) is below this threshold. + + reg_covar : float, default=1e-6 + Non-negative regularization added to the diagonal of covariance. + Allows to assure that the covariance matrices are all positive. + + max_iter : int, default=100 + The number of EM iterations to perform. + + n_init : int, default=1 + The number of initializations to perform. The result with the highest + lower bound value on the likelihood is kept. + + init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \ + default='kmeans' + The method used to initialize the weights, the means and the + covariances. + String must be one of: + + 'kmeans' : responsibilities are initialized using kmeans. + 'k-means++' : use the k-means++ method to initialize. + 'random' : responsibilities are initialized randomly. + 'random_from_data' : initial means are randomly selected data points. + + .. versionchanged:: v1.1 + `init_params` now accepts 'random_from_data' and 'k-means++' as + initialization methods. + + weight_concentration_prior_type : {'dirichlet_process', 'dirichlet_distribution'}, \ + default='dirichlet_process' + String describing the type of the weight concentration prior. + + weight_concentration_prior : float or None, default=None + The dirichlet concentration of each component on the weight + distribution (Dirichlet). This is commonly called gamma in the + literature. The higher concentration puts more mass in + the center and will lead to more components being active, while a lower + concentration parameter will lead to more mass at the edge of the + mixture weights simplex. The value of the parameter must be greater + than 0. If it is None, it's set to ``1. / n_components``. + + mean_precision_prior : float or None, default=None + The precision prior on the mean distribution (Gaussian). + Controls the extent of where means can be placed. Larger + values concentrate the cluster means around `mean_prior`. + The value of the parameter must be greater than 0. + If it is None, it is set to 1. + + mean_prior : array-like, shape (n_features,), default=None + The prior on the mean distribution (Gaussian). + If it is None, it is set to the mean of X. + + degrees_of_freedom_prior : float or None, default=None + The prior of the number of degrees of freedom on the covariance + distributions (Wishart). If it is None, it's set to `n_features`. + + covariance_prior : float or array-like, default=None + The prior on the covariance distribution (Wishart). + If it is None, the emiprical covariance prior is initialized using the + covariance of X. The shape depends on `covariance_type`:: + + (n_features, n_features) if 'full', + (n_features, n_features) if 'tied', + (n_features) if 'diag', + float if 'spherical' + + random_state : int, RandomState instance or None, default=None + Controls the random seed given to the method chosen to initialize the + parameters (see `init_params`). + In addition, it controls the generation of random samples from the + fitted distribution (see the method `sample`). + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + warm_start : bool, default=False + If 'warm_start' is True, the solution of the last fitting is used as + initialization for the next call of fit(). This can speed up + convergence when fit is called several times on similar problems. + See :term:`the Glossary `. + + verbose : int, default=0 + Enable verbose output. If 1 then it prints the current + initialization and each iteration step. If greater than 1 then + it prints also the log probability and the time needed + for each step. + + verbose_interval : int, default=10 + Number of iteration done before the next print. + + Attributes + ---------- + weights_ : array-like of shape (n_components,) + The weights of each mixture components. + + means_ : array-like of shape (n_components, n_features) + The mean of each mixture component. + + covariances_ : array-like + The covariance of each mixture component. + The shape depends on `covariance_type`:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + precisions_ : array-like + The precision matrices for each component in the mixture. A precision + matrix is the inverse of a covariance matrix. A covariance matrix is + symmetric positive definite so the mixture of Gaussian can be + equivalently parameterized by the precision matrices. Storing the + precision matrices instead of the covariance matrices makes it more + efficient to compute the log-likelihood of new samples at test time. + The shape depends on ``covariance_type``:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + precisions_cholesky_ : array-like + The cholesky decomposition of the precision matrices of each mixture + component. A precision matrix is the inverse of a covariance matrix. + A covariance matrix is symmetric positive definite so the mixture of + Gaussian can be equivalently parameterized by the precision matrices. + Storing the precision matrices instead of the covariance matrices makes + it more efficient to compute the log-likelihood of new samples at test + time. The shape depends on ``covariance_type``:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + converged_ : bool + True when convergence was reached in fit(), False otherwise. + + n_iter_ : int + Number of step used by the best fit of inference to reach the + convergence. + + lower_bound_ : float + Lower bound value on the model evidence (of the training data) of the + best fit of inference. + + weight_concentration_prior_ : tuple or float + The dirichlet concentration of each component on the weight + distribution (Dirichlet). The type depends on + ``weight_concentration_prior_type``:: + + (float, float) if 'dirichlet_process' (Beta parameters), + float if 'dirichlet_distribution' (Dirichlet parameters). + + The higher concentration puts more mass in + the center and will lead to more components being active, while a lower + concentration parameter will lead to more mass at the edge of the + simplex. + + weight_concentration_ : array-like of shape (n_components,) + The dirichlet concentration of each component on the weight + distribution (Dirichlet). + + mean_precision_prior_ : float + The precision prior on the mean distribution (Gaussian). + Controls the extent of where means can be placed. + Larger values concentrate the cluster means around `mean_prior`. + If mean_precision_prior is set to None, `mean_precision_prior_` is set + to 1. + + mean_precision_ : array-like of shape (n_components,) + The precision of each components on the mean distribution (Gaussian). + + mean_prior_ : array-like of shape (n_features,) + The prior on the mean distribution (Gaussian). + + degrees_of_freedom_prior_ : float + The prior of the number of degrees of freedom on the covariance + distributions (Wishart). + + degrees_of_freedom_ : array-like of shape (n_components,) + The number of degrees of freedom of each components in the model. + + covariance_prior_ : float or array-like + The prior on the covariance distribution (Wishart). + The shape depends on `covariance_type`:: + + (n_features, n_features) if 'full', + (n_features, n_features) if 'tied', + (n_features) if 'diag', + float if 'spherical' + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + GaussianMixture : Finite Gaussian mixture fit with EM. + + References + ---------- + + .. [1] `Bishop, Christopher M. (2006). "Pattern recognition and machine + learning". Vol. 4 No. 4. New York: Springer. + `_ + + .. [2] `Hagai Attias. (2000). "A Variational Bayesian Framework for + Graphical Models". In Advances in Neural Information Processing + Systems 12. + `_ + + .. [3] `Blei, David M. and Michael I. Jordan. (2006). "Variational + inference for Dirichlet process mixtures". Bayesian analysis 1.1 + `_ + + Examples + -------- + >>> import numpy as np + >>> from sklearn.mixture import BayesianGaussianMixture + >>> X = np.array([[1, 2], [1, 4], [1, 0], [4, 2], [12, 4], [10, 7]]) + >>> bgm = BayesianGaussianMixture(n_components=2, random_state=42).fit(X) + >>> bgm.means_ + array([[2.49... , 2.29...], + [8.45..., 4.52... ]]) + >>> bgm.predict([[0, 0], [9, 3]]) + array([0, 1]) + """ + + _parameter_constraints: dict = { + **BaseMixture._parameter_constraints, + "covariance_type": [StrOptions({"spherical", "tied", "diag", "full"})], + "weight_concentration_prior_type": [ + StrOptions({"dirichlet_process", "dirichlet_distribution"}) + ], + "weight_concentration_prior": [ + None, + Interval(Real, 0.0, None, closed="neither"), + ], + "mean_precision_prior": [None, Interval(Real, 0.0, None, closed="neither")], + "mean_prior": [None, "array-like"], + "degrees_of_freedom_prior": [None, Interval(Real, 0.0, None, closed="neither")], + "covariance_prior": [ + None, + "array-like", + Interval(Real, 0.0, None, closed="neither"), + ], + } + + def __init__( + self, + *, + n_components=1, + covariance_type="full", + tol=1e-3, + reg_covar=1e-6, + max_iter=100, + n_init=1, + init_params="kmeans", + weight_concentration_prior_type="dirichlet_process", + weight_concentration_prior=None, + mean_precision_prior=None, + mean_prior=None, + degrees_of_freedom_prior=None, + covariance_prior=None, + random_state=None, + warm_start=False, + verbose=0, + verbose_interval=10, + ): + super().__init__( + n_components=n_components, + tol=tol, + reg_covar=reg_covar, + max_iter=max_iter, + n_init=n_init, + init_params=init_params, + random_state=random_state, + warm_start=warm_start, + verbose=verbose, + verbose_interval=verbose_interval, + ) + + self.covariance_type = covariance_type + self.weight_concentration_prior_type = weight_concentration_prior_type + self.weight_concentration_prior = weight_concentration_prior + self.mean_precision_prior = mean_precision_prior + self.mean_prior = mean_prior + self.degrees_of_freedom_prior = degrees_of_freedom_prior + self.covariance_prior = covariance_prior + + def _check_parameters(self, X): + """Check that the parameters are well defined. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + self._check_weights_parameters() + self._check_means_parameters(X) + self._check_precision_parameters(X) + self._checkcovariance_prior_parameter(X) + + def _check_weights_parameters(self): + """Check the parameter of the Dirichlet distribution.""" + if self.weight_concentration_prior is None: + self.weight_concentration_prior_ = 1.0 / self.n_components + else: + self.weight_concentration_prior_ = self.weight_concentration_prior + + def _check_means_parameters(self, X): + """Check the parameters of the Gaussian distribution. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + _, n_features = X.shape + + if self.mean_precision_prior is None: + self.mean_precision_prior_ = 1.0 + else: + self.mean_precision_prior_ = self.mean_precision_prior + + if self.mean_prior is None: + self.mean_prior_ = X.mean(axis=0) + else: + self.mean_prior_ = check_array( + self.mean_prior, dtype=[np.float64, np.float32], ensure_2d=False + ) + _check_shape(self.mean_prior_, (n_features,), "means") + + def _check_precision_parameters(self, X): + """Check the prior parameters of the precision distribution. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + _, n_features = X.shape + + if self.degrees_of_freedom_prior is None: + self.degrees_of_freedom_prior_ = n_features + elif self.degrees_of_freedom_prior > n_features - 1.0: + self.degrees_of_freedom_prior_ = self.degrees_of_freedom_prior + else: + raise ValueError( + "The parameter 'degrees_of_freedom_prior' " + "should be greater than %d, but got %.3f." + % (n_features - 1, self.degrees_of_freedom_prior) + ) + + def _checkcovariance_prior_parameter(self, X): + """Check the `covariance_prior_`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + _, n_features = X.shape + + if self.covariance_prior is None: + self.covariance_prior_ = { + "full": np.atleast_2d(np.cov(X.T)), + "tied": np.atleast_2d(np.cov(X.T)), + "diag": np.var(X, axis=0, ddof=1), + "spherical": np.var(X, axis=0, ddof=1).mean(), + }[self.covariance_type] + + elif self.covariance_type in ["full", "tied"]: + self.covariance_prior_ = check_array( + self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False + ) + _check_shape( + self.covariance_prior_, + (n_features, n_features), + "%s covariance_prior" % self.covariance_type, + ) + _check_precision_matrix(self.covariance_prior_, self.covariance_type) + elif self.covariance_type == "diag": + self.covariance_prior_ = check_array( + self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False + ) + _check_shape( + self.covariance_prior_, + (n_features,), + "%s covariance_prior" % self.covariance_type, + ) + _check_precision_positivity(self.covariance_prior_, self.covariance_type) + # spherical case + else: + self.covariance_prior_ = self.covariance_prior + + def _initialize(self, X, resp): + """Initialization of the mixture parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + resp : array-like of shape (n_samples, n_components) + """ + nk, xk, sk = _estimate_gaussian_parameters( + X, resp, self.reg_covar, self.covariance_type + ) + + self._estimate_weights(nk) + self._estimate_means(nk, xk) + self._estimate_precisions(nk, xk, sk) + + def _estimate_weights(self, nk): + """Estimate the parameters of the Dirichlet distribution. + + Parameters + ---------- + nk : array-like of shape (n_components,) + """ + if self.weight_concentration_prior_type == "dirichlet_process": + # For dirichlet process weight_concentration will be a tuple + # containing the two parameters of the beta distribution + self.weight_concentration_ = ( + 1.0 + nk, + ( + self.weight_concentration_prior_ + + np.hstack((np.cumsum(nk[::-1])[-2::-1], 0)) + ), + ) + else: + # case Variational Gaussian mixture with dirichlet distribution + self.weight_concentration_ = self.weight_concentration_prior_ + nk + + def _estimate_means(self, nk, xk): + """Estimate the parameters of the Gaussian distribution. + + Parameters + ---------- + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + """ + self.mean_precision_ = self.mean_precision_prior_ + nk + self.means_ = ( + self.mean_precision_prior_ * self.mean_prior_ + nk[:, np.newaxis] * xk + ) / self.mean_precision_[:, np.newaxis] + + def _estimate_precisions(self, nk, xk, sk): + """Estimate the precisions parameters of the precision distribution. + + Parameters + ---------- + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like + The shape depends of `covariance_type`: + 'full' : (n_components, n_features, n_features) + 'tied' : (n_features, n_features) + 'diag' : (n_components, n_features) + 'spherical' : (n_components,) + """ + { + "full": self._estimate_wishart_full, + "tied": self._estimate_wishart_tied, + "diag": self._estimate_wishart_diag, + "spherical": self._estimate_wishart_spherical, + }[self.covariance_type](nk, xk, sk) + + self.precisions_cholesky_ = _compute_precision_cholesky( + self.covariances_, self.covariance_type + ) + + def _estimate_wishart_full(self, nk, xk, sk): + """Estimate the full Wishart distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like of shape (n_components, n_features, n_features) + """ + _, n_features = xk.shape + + # Warning : in some Bishop book, there is a typo on the formula 10.63 + # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` is + # the correct formula + self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk + + self.covariances_ = np.empty((self.n_components, n_features, n_features)) + + for k in range(self.n_components): + diff = xk[k] - self.mean_prior_ + self.covariances_[k] = ( + self.covariance_prior_ + + nk[k] * sk[k] + + nk[k] + * self.mean_precision_prior_ + / self.mean_precision_[k] + * np.outer(diff, diff) + ) + + # Contrary to the original bishop book, we normalize the covariances + self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis, np.newaxis] + + def _estimate_wishart_tied(self, nk, xk, sk): + """Estimate the tied Wishart distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like of shape (n_features, n_features) + """ + _, n_features = xk.shape + + # Warning : in some Bishop book, there is a typo on the formula 10.63 + # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` + # is the correct formula + self.degrees_of_freedom_ = ( + self.degrees_of_freedom_prior_ + nk.sum() / self.n_components + ) + + diff = xk - self.mean_prior_ + self.covariances_ = ( + self.covariance_prior_ + + sk * nk.sum() / self.n_components + + self.mean_precision_prior_ + / self.n_components + * np.dot((nk / self.mean_precision_) * diff.T, diff) + ) + + # Contrary to the original bishop book, we normalize the covariances + self.covariances_ /= self.degrees_of_freedom_ + + def _estimate_wishart_diag(self, nk, xk, sk): + """Estimate the diag Wishart distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like of shape (n_components, n_features) + """ + _, n_features = xk.shape + + # Warning : in some Bishop book, there is a typo on the formula 10.63 + # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` + # is the correct formula + self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk + + diff = xk - self.mean_prior_ + self.covariances_ = self.covariance_prior_ + nk[:, np.newaxis] * ( + sk + + (self.mean_precision_prior_ / self.mean_precision_)[:, np.newaxis] + * np.square(diff) + ) + + # Contrary to the original bishop book, we normalize the covariances + self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis] + + def _estimate_wishart_spherical(self, nk, xk, sk): + """Estimate the spherical Wishart distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like of shape (n_components,) + """ + _, n_features = xk.shape + + # Warning : in some Bishop book, there is a typo on the formula 10.63 + # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` + # is the correct formula + self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk + + diff = xk - self.mean_prior_ + self.covariances_ = self.covariance_prior_ + nk * ( + sk + + self.mean_precision_prior_ + / self.mean_precision_ + * np.mean(np.square(diff), 1) + ) + + # Contrary to the original bishop book, we normalize the covariances + self.covariances_ /= self.degrees_of_freedom_ + + def _m_step(self, X, log_resp): + """M step. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + log_resp : array-like of shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + """ + n_samples, _ = X.shape + + nk, xk, sk = _estimate_gaussian_parameters( + X, np.exp(log_resp), self.reg_covar, self.covariance_type + ) + self._estimate_weights(nk) + self._estimate_means(nk, xk) + self._estimate_precisions(nk, xk, sk) + + def _estimate_log_weights(self): + if self.weight_concentration_prior_type == "dirichlet_process": + digamma_sum = digamma( + self.weight_concentration_[0] + self.weight_concentration_[1] + ) + digamma_a = digamma(self.weight_concentration_[0]) + digamma_b = digamma(self.weight_concentration_[1]) + return ( + digamma_a + - digamma_sum + + np.hstack((0, np.cumsum(digamma_b - digamma_sum)[:-1])) + ) + else: + # case Variational Gaussian mixture with dirichlet distribution + return digamma(self.weight_concentration_) - digamma( + np.sum(self.weight_concentration_) + ) + + def _estimate_log_prob(self, X): + _, n_features = X.shape + # We remove `n_features * np.log(self.degrees_of_freedom_)` because + # the precision matrix is normalized + log_gauss = _estimate_log_gaussian_prob( + X, self.means_, self.precisions_cholesky_, self.covariance_type + ) - 0.5 * n_features * np.log(self.degrees_of_freedom_) + + log_lambda = n_features * np.log(2.0) + np.sum( + digamma( + 0.5 + * (self.degrees_of_freedom_ - np.arange(0, n_features)[:, np.newaxis]) + ), + 0, + ) + + return log_gauss + 0.5 * (log_lambda - n_features / self.mean_precision_) + + def _compute_lower_bound(self, log_resp, log_prob_norm): + """Estimate the lower bound of the model. + + The lower bound on the likelihood (of the training data with respect to + the model) is used to detect the convergence and has to increase at + each iteration. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + log_resp : array, shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + + log_prob_norm : float + Logarithm of the probability of each sample in X. + + Returns + ------- + lower_bound : float + """ + # Contrary to the original formula, we have done some simplification + # and removed all the constant terms. + (n_features,) = self.mean_prior_.shape + + # We removed `.5 * n_features * np.log(self.degrees_of_freedom_)` + # because the precision matrix is normalized. + log_det_precisions_chol = _compute_log_det_cholesky( + self.precisions_cholesky_, self.covariance_type, n_features + ) - 0.5 * n_features * np.log(self.degrees_of_freedom_) + + if self.covariance_type == "tied": + log_wishart = self.n_components * np.float64( + _log_wishart_norm( + self.degrees_of_freedom_, log_det_precisions_chol, n_features + ) + ) + else: + log_wishart = np.sum( + _log_wishart_norm( + self.degrees_of_freedom_, log_det_precisions_chol, n_features + ) + ) + + if self.weight_concentration_prior_type == "dirichlet_process": + log_norm_weight = -np.sum( + betaln(self.weight_concentration_[0], self.weight_concentration_[1]) + ) + else: + log_norm_weight = _log_dirichlet_norm(self.weight_concentration_) + + return ( + -np.sum(np.exp(log_resp) * log_resp) + - log_wishart + - log_norm_weight + - 0.5 * n_features * np.sum(np.log(self.mean_precision_)) + ) + + def _get_parameters(self): + return ( + self.weight_concentration_, + self.mean_precision_, + self.means_, + self.degrees_of_freedom_, + self.covariances_, + self.precisions_cholesky_, + ) + + def _set_parameters(self, params): + ( + self.weight_concentration_, + self.mean_precision_, + self.means_, + self.degrees_of_freedom_, + self.covariances_, + self.precisions_cholesky_, + ) = params + + # Weights computation + if self.weight_concentration_prior_type == "dirichlet_process": + weight_dirichlet_sum = ( + self.weight_concentration_[0] + self.weight_concentration_[1] + ) + tmp = self.weight_concentration_[1] / weight_dirichlet_sum + self.weights_ = ( + self.weight_concentration_[0] + / weight_dirichlet_sum + * np.hstack((1, np.cumprod(tmp[:-1]))) + ) + self.weights_ /= np.sum(self.weights_) + else: + self.weights_ = self.weight_concentration_ / np.sum( + self.weight_concentration_ + ) + + # Precisions matrices computation + if self.covariance_type == "full": + self.precisions_ = np.array( + [ + np.dot(prec_chol, prec_chol.T) + for prec_chol in self.precisions_cholesky_ + ] + ) + + elif self.covariance_type == "tied": + self.precisions_ = np.dot( + self.precisions_cholesky_, self.precisions_cholesky_.T + ) + else: + self.precisions_ = self.precisions_cholesky_**2 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_gaussian_mixture.py b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_gaussian_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..09e3674a6779fce1a6270c44af09bc014fcc29b7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/_gaussian_mixture.py @@ -0,0 +1,912 @@ +"""Gaussian Mixture Model.""" + +# Author: Wei Xue +# Modified by Thierry Guillemot +# License: BSD 3 clause + +import numpy as np +from scipy import linalg + +from ..utils import check_array +from ..utils._param_validation import StrOptions +from ..utils.extmath import row_norms +from ._base import BaseMixture, _check_shape + +############################################################################### +# Gaussian mixture shape checkers used by the GaussianMixture class + + +def _check_weights(weights, n_components): + """Check the user provided 'weights'. + + Parameters + ---------- + weights : array-like of shape (n_components,) + The proportions of components of each mixture. + + n_components : int + Number of components. + + Returns + ------- + weights : array, shape (n_components,) + """ + weights = check_array(weights, dtype=[np.float64, np.float32], ensure_2d=False) + _check_shape(weights, (n_components,), "weights") + + # check range + if any(np.less(weights, 0.0)) or any(np.greater(weights, 1.0)): + raise ValueError( + "The parameter 'weights' should be in the range " + "[0, 1], but got max value %.5f, min value %.5f" + % (np.min(weights), np.max(weights)) + ) + + # check normalization + if not np.allclose(np.abs(1.0 - np.sum(weights)), 0.0): + raise ValueError( + "The parameter 'weights' should be normalized, but got sum(weights) = %.5f" + % np.sum(weights) + ) + return weights + + +def _check_means(means, n_components, n_features): + """Validate the provided 'means'. + + Parameters + ---------- + means : array-like of shape (n_components, n_features) + The centers of the current components. + + n_components : int + Number of components. + + n_features : int + Number of features. + + Returns + ------- + means : array, (n_components, n_features) + """ + means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False) + _check_shape(means, (n_components, n_features), "means") + return means + + +def _check_precision_positivity(precision, covariance_type): + """Check a precision vector is positive-definite.""" + if np.any(np.less_equal(precision, 0.0)): + raise ValueError("'%s precision' should be positive" % covariance_type) + + +def _check_precision_matrix(precision, covariance_type): + """Check a precision matrix is symmetric and positive-definite.""" + if not ( + np.allclose(precision, precision.T) and np.all(linalg.eigvalsh(precision) > 0.0) + ): + raise ValueError( + "'%s precision' should be symmetric, positive-definite" % covariance_type + ) + + +def _check_precisions_full(precisions, covariance_type): + """Check the precision matrices are symmetric and positive-definite.""" + for prec in precisions: + _check_precision_matrix(prec, covariance_type) + + +def _check_precisions(precisions, covariance_type, n_components, n_features): + """Validate user provided precisions. + + Parameters + ---------- + precisions : array-like + 'full' : shape of (n_components, n_features, n_features) + 'tied' : shape of (n_features, n_features) + 'diag' : shape of (n_components, n_features) + 'spherical' : shape of (n_components,) + + covariance_type : str + + n_components : int + Number of components. + + n_features : int + Number of features. + + Returns + ------- + precisions : array + """ + precisions = check_array( + precisions, + dtype=[np.float64, np.float32], + ensure_2d=False, + allow_nd=covariance_type == "full", + ) + + precisions_shape = { + "full": (n_components, n_features, n_features), + "tied": (n_features, n_features), + "diag": (n_components, n_features), + "spherical": (n_components,), + } + _check_shape( + precisions, precisions_shape[covariance_type], "%s precision" % covariance_type + ) + + _check_precisions = { + "full": _check_precisions_full, + "tied": _check_precision_matrix, + "diag": _check_precision_positivity, + "spherical": _check_precision_positivity, + } + _check_precisions[covariance_type](precisions, covariance_type) + return precisions + + +############################################################################### +# Gaussian mixture parameters estimators (used by the M-Step) + + +def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar): + """Estimate the full covariance matrices. + + Parameters + ---------- + resp : array-like of shape (n_samples, n_components) + + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + means : array-like of shape (n_components, n_features) + + reg_covar : float + + Returns + ------- + covariances : array, shape (n_components, n_features, n_features) + The covariance matrix of the current components. + """ + n_components, n_features = means.shape + covariances = np.empty((n_components, n_features, n_features)) + for k in range(n_components): + diff = X - means[k] + covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k] + covariances[k].flat[:: n_features + 1] += reg_covar + return covariances + + +def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar): + """Estimate the tied covariance matrix. + + Parameters + ---------- + resp : array-like of shape (n_samples, n_components) + + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + means : array-like of shape (n_components, n_features) + + reg_covar : float + + Returns + ------- + covariance : array, shape (n_features, n_features) + The tied covariance matrix of the components. + """ + avg_X2 = np.dot(X.T, X) + avg_means2 = np.dot(nk * means.T, means) + covariance = avg_X2 - avg_means2 + covariance /= nk.sum() + covariance.flat[:: len(covariance) + 1] += reg_covar + return covariance + + +def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar): + """Estimate the diagonal covariance vectors. + + Parameters + ---------- + responsibilities : array-like of shape (n_samples, n_components) + + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + means : array-like of shape (n_components, n_features) + + reg_covar : float + + Returns + ------- + covariances : array, shape (n_components, n_features) + The covariance vector of the current components. + """ + avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis] + avg_means2 = means**2 + avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis] + return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar + + +def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar): + """Estimate the spherical variance values. + + Parameters + ---------- + responsibilities : array-like of shape (n_samples, n_components) + + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + means : array-like of shape (n_components, n_features) + + reg_covar : float + + Returns + ------- + variances : array, shape (n_components,) + The variance values of each components. + """ + return _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar).mean(1) + + +def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type): + """Estimate the Gaussian distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input data array. + + resp : array-like of shape (n_samples, n_components) + The responsibilities for each data sample in X. + + reg_covar : float + The regularization added to the diagonal of the covariance matrices. + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + The type of precision matrices. + + Returns + ------- + nk : array-like of shape (n_components,) + The numbers of data samples in the current components. + + means : array-like of shape (n_components, n_features) + The centers of the current components. + + covariances : array-like + The covariance matrix of the current components. + The shape depends of the covariance_type. + """ + nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps + means = np.dot(resp.T, X) / nk[:, np.newaxis] + covariances = { + "full": _estimate_gaussian_covariances_full, + "tied": _estimate_gaussian_covariances_tied, + "diag": _estimate_gaussian_covariances_diag, + "spherical": _estimate_gaussian_covariances_spherical, + }[covariance_type](resp, X, nk, means, reg_covar) + return nk, means, covariances + + +def _compute_precision_cholesky(covariances, covariance_type): + """Compute the Cholesky decomposition of the precisions. + + Parameters + ---------- + covariances : array-like + The covariance matrix of the current components. + The shape depends of the covariance_type. + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + The type of precision matrices. + + Returns + ------- + precisions_cholesky : array-like + The cholesky decomposition of sample precisions of the current + components. The shape depends of the covariance_type. + """ + estimate_precision_error_message = ( + "Fitting the mixture model failed because some components have " + "ill-defined empirical covariance (for instance caused by singleton " + "or collapsed samples). Try to decrease the number of components, " + "or increase reg_covar." + ) + + if covariance_type == "full": + n_components, n_features, _ = covariances.shape + precisions_chol = np.empty((n_components, n_features, n_features)) + for k, covariance in enumerate(covariances): + try: + cov_chol = linalg.cholesky(covariance, lower=True) + except linalg.LinAlgError: + raise ValueError(estimate_precision_error_message) + precisions_chol[k] = linalg.solve_triangular( + cov_chol, np.eye(n_features), lower=True + ).T + elif covariance_type == "tied": + _, n_features = covariances.shape + try: + cov_chol = linalg.cholesky(covariances, lower=True) + except linalg.LinAlgError: + raise ValueError(estimate_precision_error_message) + precisions_chol = linalg.solve_triangular( + cov_chol, np.eye(n_features), lower=True + ).T + else: + if np.any(np.less_equal(covariances, 0.0)): + raise ValueError(estimate_precision_error_message) + precisions_chol = 1.0 / np.sqrt(covariances) + return precisions_chol + + +def _flipudlr(array): + """Reverse the rows and columns of an array.""" + return np.flipud(np.fliplr(array)) + + +def _compute_precision_cholesky_from_precisions(precisions, covariance_type): + r"""Compute the Cholesky decomposition of precisions using precisions themselves. + + As implemented in :func:`_compute_precision_cholesky`, the `precisions_cholesky_` is + an upper-triangular matrix for each Gaussian component, which can be expressed as + the $UU^T$ factorization of the precision matrix for each Gaussian component, where + $U$ is an upper-triangular matrix. + + In order to use the Cholesky decomposition to get $UU^T$, the precision matrix + $\Lambda$ needs to be permutated such that its rows and columns are reversed, which + can be done by applying a similarity transformation with an exchange matrix $J$, + where the 1 elements reside on the anti-diagonal and all other elements are 0. In + particular, the Cholesky decomposition of the transformed precision matrix is + $J\Lambda J=LL^T$, where $L$ is a lower-triangular matrix. Because $\Lambda=UU^T$ + and $J=J^{-1}=J^T$, the `precisions_cholesky_` for each Gaussian component can be + expressed as $JLJ$. + + Refer to #26415 for details. + + Parameters + ---------- + precisions : array-like + The precision matrix of the current components. + The shape depends on the covariance_type. + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + The type of precision matrices. + + Returns + ------- + precisions_cholesky : array-like + The cholesky decomposition of sample precisions of the current + components. The shape depends on the covariance_type. + """ + if covariance_type == "full": + precisions_cholesky = np.array( + [ + _flipudlr(linalg.cholesky(_flipudlr(precision), lower=True)) + for precision in precisions + ] + ) + elif covariance_type == "tied": + precisions_cholesky = _flipudlr( + linalg.cholesky(_flipudlr(precisions), lower=True) + ) + else: + precisions_cholesky = np.sqrt(precisions) + return precisions_cholesky + + +############################################################################### +# Gaussian mixture probability estimators +def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features): + """Compute the log-det of the cholesky decomposition of matrices. + + Parameters + ---------- + matrix_chol : array-like + Cholesky decompositions of the matrices. + 'full' : shape of (n_components, n_features, n_features) + 'tied' : shape of (n_features, n_features) + 'diag' : shape of (n_components, n_features) + 'spherical' : shape of (n_components,) + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + + n_features : int + Number of features. + + Returns + ------- + log_det_precision_chol : array-like of shape (n_components,) + The determinant of the precision matrix for each component. + """ + if covariance_type == "full": + n_components, _, _ = matrix_chol.shape + log_det_chol = np.sum( + np.log(matrix_chol.reshape(n_components, -1)[:, :: n_features + 1]), 1 + ) + + elif covariance_type == "tied": + log_det_chol = np.sum(np.log(np.diag(matrix_chol))) + + elif covariance_type == "diag": + log_det_chol = np.sum(np.log(matrix_chol), axis=1) + + else: + log_det_chol = n_features * (np.log(matrix_chol)) + + return log_det_chol + + +def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type): + """Estimate the log Gaussian probability. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + means : array-like of shape (n_components, n_features) + + precisions_chol : array-like + Cholesky decompositions of the precision matrices. + 'full' : shape of (n_components, n_features, n_features) + 'tied' : shape of (n_features, n_features) + 'diag' : shape of (n_components, n_features) + 'spherical' : shape of (n_components,) + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + + Returns + ------- + log_prob : array, shape (n_samples, n_components) + """ + n_samples, n_features = X.shape + n_components, _ = means.shape + # The determinant of the precision matrix from the Cholesky decomposition + # corresponds to the negative half of the determinant of the full precision + # matrix. + # In short: det(precision_chol) = - det(precision) / 2 + log_det = _compute_log_det_cholesky(precisions_chol, covariance_type, n_features) + + if covariance_type == "full": + log_prob = np.empty((n_samples, n_components)) + for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)): + y = np.dot(X, prec_chol) - np.dot(mu, prec_chol) + log_prob[:, k] = np.sum(np.square(y), axis=1) + + elif covariance_type == "tied": + log_prob = np.empty((n_samples, n_components)) + for k, mu in enumerate(means): + y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol) + log_prob[:, k] = np.sum(np.square(y), axis=1) + + elif covariance_type == "diag": + precisions = precisions_chol**2 + log_prob = ( + np.sum((means**2 * precisions), 1) + - 2.0 * np.dot(X, (means * precisions).T) + + np.dot(X**2, precisions.T) + ) + + elif covariance_type == "spherical": + precisions = precisions_chol**2 + log_prob = ( + np.sum(means**2, 1) * precisions + - 2 * np.dot(X, means.T * precisions) + + np.outer(row_norms(X, squared=True), precisions) + ) + # Since we are using the precision of the Cholesky decomposition, + # `- 0.5 * log_det_precision` becomes `+ log_det_precision_chol` + return -0.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det + + +class GaussianMixture(BaseMixture): + """Gaussian Mixture. + + Representation of a Gaussian mixture model probability distribution. + This class allows to estimate the parameters of a Gaussian mixture + distribution. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + n_components : int, default=1 + The number of mixture components. + + covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full' + String describing the type of covariance parameters to use. + Must be one of: + + - 'full': each component has its own general covariance matrix. + - 'tied': all components share the same general covariance matrix. + - 'diag': each component has its own diagonal covariance matrix. + - 'spherical': each component has its own single variance. + + tol : float, default=1e-3 + The convergence threshold. EM iterations will stop when the + lower bound average gain is below this threshold. + + reg_covar : float, default=1e-6 + Non-negative regularization added to the diagonal of covariance. + Allows to assure that the covariance matrices are all positive. + + max_iter : int, default=100 + The number of EM iterations to perform. + + n_init : int, default=1 + The number of initializations to perform. The best results are kept. + + init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \ + default='kmeans' + The method used to initialize the weights, the means and the + precisions. + String must be one of: + + - 'kmeans' : responsibilities are initialized using kmeans. + - 'k-means++' : use the k-means++ method to initialize. + - 'random' : responsibilities are initialized randomly. + - 'random_from_data' : initial means are randomly selected data points. + + .. versionchanged:: v1.1 + `init_params` now accepts 'random_from_data' and 'k-means++' as + initialization methods. + + weights_init : array-like of shape (n_components, ), default=None + The user-provided initial weights. + If it is None, weights are initialized using the `init_params` method. + + means_init : array-like of shape (n_components, n_features), default=None + The user-provided initial means, + If it is None, means are initialized using the `init_params` method. + + precisions_init : array-like, default=None + The user-provided initial precisions (inverse of the covariance + matrices). + If it is None, precisions are initialized using the 'init_params' + method. + The shape depends on 'covariance_type':: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + random_state : int, RandomState instance or None, default=None + Controls the random seed given to the method chosen to initialize the + parameters (see `init_params`). + In addition, it controls the generation of random samples from the + fitted distribution (see the method `sample`). + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + warm_start : bool, default=False + If 'warm_start' is True, the solution of the last fitting is used as + initialization for the next call of fit(). This can speed up + convergence when fit is called several times on similar problems. + In that case, 'n_init' is ignored and only a single initialization + occurs upon the first call. + See :term:`the Glossary `. + + verbose : int, default=0 + Enable verbose output. If 1 then it prints the current + initialization and each iteration step. If greater than 1 then + it prints also the log probability and the time needed + for each step. + + verbose_interval : int, default=10 + Number of iteration done before the next print. + + Attributes + ---------- + weights_ : array-like of shape (n_components,) + The weights of each mixture components. + + means_ : array-like of shape (n_components, n_features) + The mean of each mixture component. + + covariances_ : array-like + The covariance of each mixture component. + The shape depends on `covariance_type`:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + precisions_ : array-like + The precision matrices for each component in the mixture. A precision + matrix is the inverse of a covariance matrix. A covariance matrix is + symmetric positive definite so the mixture of Gaussian can be + equivalently parameterized by the precision matrices. Storing the + precision matrices instead of the covariance matrices makes it more + efficient to compute the log-likelihood of new samples at test time. + The shape depends on `covariance_type`:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + precisions_cholesky_ : array-like + The cholesky decomposition of the precision matrices of each mixture + component. A precision matrix is the inverse of a covariance matrix. + A covariance matrix is symmetric positive definite so the mixture of + Gaussian can be equivalently parameterized by the precision matrices. + Storing the precision matrices instead of the covariance matrices makes + it more efficient to compute the log-likelihood of new samples at test + time. The shape depends on `covariance_type`:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + converged_ : bool + True when convergence was reached in fit(), False otherwise. + + n_iter_ : int + Number of step used by the best fit of EM to reach the convergence. + + lower_bound_ : float + Lower bound value on the log-likelihood (of the training data with + respect to the model) of the best fit of EM. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + BayesianGaussianMixture : Gaussian mixture model fit with a variational + inference. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.mixture import GaussianMixture + >>> X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]]) + >>> gm = GaussianMixture(n_components=2, random_state=0).fit(X) + >>> gm.means_ + array([[10., 2.], + [ 1., 2.]]) + >>> gm.predict([[0, 0], [12, 3]]) + array([1, 0]) + """ + + _parameter_constraints: dict = { + **BaseMixture._parameter_constraints, + "covariance_type": [StrOptions({"full", "tied", "diag", "spherical"})], + "weights_init": ["array-like", None], + "means_init": ["array-like", None], + "precisions_init": ["array-like", None], + } + + def __init__( + self, + n_components=1, + *, + covariance_type="full", + tol=1e-3, + reg_covar=1e-6, + max_iter=100, + n_init=1, + init_params="kmeans", + weights_init=None, + means_init=None, + precisions_init=None, + random_state=None, + warm_start=False, + verbose=0, + verbose_interval=10, + ): + super().__init__( + n_components=n_components, + tol=tol, + reg_covar=reg_covar, + max_iter=max_iter, + n_init=n_init, + init_params=init_params, + random_state=random_state, + warm_start=warm_start, + verbose=verbose, + verbose_interval=verbose_interval, + ) + + self.covariance_type = covariance_type + self.weights_init = weights_init + self.means_init = means_init + self.precisions_init = precisions_init + + def _check_parameters(self, X): + """Check the Gaussian mixture parameters are well defined.""" + _, n_features = X.shape + + if self.weights_init is not None: + self.weights_init = _check_weights(self.weights_init, self.n_components) + + if self.means_init is not None: + self.means_init = _check_means( + self.means_init, self.n_components, n_features + ) + + if self.precisions_init is not None: + self.precisions_init = _check_precisions( + self.precisions_init, + self.covariance_type, + self.n_components, + n_features, + ) + + def _initialize_parameters(self, X, random_state): + # If all the initial parameters are all provided, then there is no need to run + # the initialization. + compute_resp = ( + self.weights_init is None + or self.means_init is None + or self.precisions_init is None + ) + if compute_resp: + super()._initialize_parameters(X, random_state) + else: + self._initialize(X, None) + + def _initialize(self, X, resp): + """Initialization of the Gaussian mixture parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + resp : array-like of shape (n_samples, n_components) + """ + n_samples, _ = X.shape + weights, means, covariances = None, None, None + if resp is not None: + weights, means, covariances = _estimate_gaussian_parameters( + X, resp, self.reg_covar, self.covariance_type + ) + if self.weights_init is None: + weights /= n_samples + + self.weights_ = weights if self.weights_init is None else self.weights_init + self.means_ = means if self.means_init is None else self.means_init + + if self.precisions_init is None: + self.covariances_ = covariances + self.precisions_cholesky_ = _compute_precision_cholesky( + covariances, self.covariance_type + ) + else: + self.precisions_cholesky_ = _compute_precision_cholesky_from_precisions( + self.precisions_init, self.covariance_type + ) + + def _m_step(self, X, log_resp): + """M step. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + log_resp : array-like of shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + """ + self.weights_, self.means_, self.covariances_ = _estimate_gaussian_parameters( + X, np.exp(log_resp), self.reg_covar, self.covariance_type + ) + self.weights_ /= self.weights_.sum() + self.precisions_cholesky_ = _compute_precision_cholesky( + self.covariances_, self.covariance_type + ) + + def _estimate_log_prob(self, X): + return _estimate_log_gaussian_prob( + X, self.means_, self.precisions_cholesky_, self.covariance_type + ) + + def _estimate_log_weights(self): + return np.log(self.weights_) + + def _compute_lower_bound(self, _, log_prob_norm): + return log_prob_norm + + def _get_parameters(self): + return ( + self.weights_, + self.means_, + self.covariances_, + self.precisions_cholesky_, + ) + + def _set_parameters(self, params): + ( + self.weights_, + self.means_, + self.covariances_, + self.precisions_cholesky_, + ) = params + + # Attributes computation + _, n_features = self.means_.shape + + if self.covariance_type == "full": + self.precisions_ = np.empty(self.precisions_cholesky_.shape) + for k, prec_chol in enumerate(self.precisions_cholesky_): + self.precisions_[k] = np.dot(prec_chol, prec_chol.T) + + elif self.covariance_type == "tied": + self.precisions_ = np.dot( + self.precisions_cholesky_, self.precisions_cholesky_.T + ) + else: + self.precisions_ = self.precisions_cholesky_**2 + + def _n_parameters(self): + """Return the number of free parameters in the model.""" + _, n_features = self.means_.shape + if self.covariance_type == "full": + cov_params = self.n_components * n_features * (n_features + 1) / 2.0 + elif self.covariance_type == "diag": + cov_params = self.n_components * n_features + elif self.covariance_type == "tied": + cov_params = n_features * (n_features + 1) / 2.0 + elif self.covariance_type == "spherical": + cov_params = self.n_components + mean_params = n_features * self.n_components + return int(cov_params + mean_params + self.n_components - 1) + + def bic(self, X): + """Bayesian information criterion for the current model on the input X. + + You can refer to this :ref:`mathematical section ` for more + details regarding the formulation of the BIC used. + + Parameters + ---------- + X : array of shape (n_samples, n_dimensions) + The input samples. + + Returns + ------- + bic : float + The lower the better. + """ + return -2 * self.score(X) * X.shape[0] + self._n_parameters() * np.log( + X.shape[0] + ) + + def aic(self, X): + """Akaike information criterion for the current model on the input X. + + You can refer to this :ref:`mathematical section ` for more + details regarding the formulation of the AIC used. + + Parameters + ---------- + X : array of shape (n_samples, n_dimensions) + The input samples. + + Returns + ------- + aic : float + The lower the better. + """ + return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters() diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a462cb8a0ed9a69dcb395f3053f8cbd31c269f1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_bayesian_mixture.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_bayesian_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d836cee48cbd8c5bb054f2583a8a3172beafb050 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_bayesian_mixture.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_gaussian_mixture.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_gaussian_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..751a3eb799309681cd83ffbeb40c67ea839db05f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_gaussian_mixture.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_mixture.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..366bb963014a02759a3e527caad784886dc22cc4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_mixture.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_bayesian_mixture.py b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_bayesian_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..9c6eb4a86ea0d4e5988706e6e841fe5f5b992871 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_bayesian_mixture.py @@ -0,0 +1,466 @@ +# Author: Wei Xue +# Thierry Guillemot +# License: BSD 3 clause +import copy + +import numpy as np +import pytest +from scipy.special import gammaln + +from sklearn.exceptions import ConvergenceWarning, NotFittedError +from sklearn.metrics.cluster import adjusted_rand_score +from sklearn.mixture import BayesianGaussianMixture +from sklearn.mixture._bayesian_mixture import _log_dirichlet_norm, _log_wishart_norm +from sklearn.mixture.tests.test_gaussian_mixture import RandomData +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_equal, + ignore_warnings, +) + +COVARIANCE_TYPE = ["full", "tied", "diag", "spherical"] +PRIOR_TYPE = ["dirichlet_process", "dirichlet_distribution"] + + +def test_log_dirichlet_norm(): + rng = np.random.RandomState(0) + + weight_concentration = rng.rand(2) + expected_norm = gammaln(np.sum(weight_concentration)) - np.sum( + gammaln(weight_concentration) + ) + predected_norm = _log_dirichlet_norm(weight_concentration) + + assert_almost_equal(expected_norm, predected_norm) + + +def test_log_wishart_norm(): + rng = np.random.RandomState(0) + + n_components, n_features = 5, 2 + degrees_of_freedom = np.abs(rng.rand(n_components)) + 1.0 + log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components)) + + expected_norm = np.empty(5) + for k, (degrees_of_freedom_k, log_det_k) in enumerate( + zip(degrees_of_freedom, log_det_precisions_chol) + ): + expected_norm[k] = -( + degrees_of_freedom_k * (log_det_k + 0.5 * n_features * np.log(2.0)) + + np.sum( + gammaln( + 0.5 + * (degrees_of_freedom_k - np.arange(0, n_features)[:, np.newaxis]) + ), + 0, + ) + ).item() + predected_norm = _log_wishart_norm( + degrees_of_freedom, log_det_precisions_chol, n_features + ) + + assert_almost_equal(expected_norm, predected_norm) + + +def test_bayesian_mixture_weights_prior_initialisation(): + rng = np.random.RandomState(0) + n_samples, n_components, n_features = 10, 5, 2 + X = rng.rand(n_samples, n_features) + + # Check correct init for a given value of weight_concentration_prior + weight_concentration_prior = rng.rand() + bgmm = BayesianGaussianMixture( + weight_concentration_prior=weight_concentration_prior, random_state=rng + ).fit(X) + assert_almost_equal(weight_concentration_prior, bgmm.weight_concentration_prior_) + + # Check correct init for the default value of weight_concentration_prior + bgmm = BayesianGaussianMixture(n_components=n_components, random_state=rng).fit(X) + assert_almost_equal(1.0 / n_components, bgmm.weight_concentration_prior_) + + +def test_bayesian_mixture_mean_prior_initialisation(): + rng = np.random.RandomState(0) + n_samples, n_components, n_features = 10, 3, 2 + X = rng.rand(n_samples, n_features) + + # Check correct init for a given value of mean_precision_prior + mean_precision_prior = rng.rand() + bgmm = BayesianGaussianMixture( + mean_precision_prior=mean_precision_prior, random_state=rng + ).fit(X) + assert_almost_equal(mean_precision_prior, bgmm.mean_precision_prior_) + + # Check correct init for the default value of mean_precision_prior + bgmm = BayesianGaussianMixture(random_state=rng).fit(X) + assert_almost_equal(1.0, bgmm.mean_precision_prior_) + + # Check correct init for a given value of mean_prior + mean_prior = rng.rand(n_features) + bgmm = BayesianGaussianMixture( + n_components=n_components, mean_prior=mean_prior, random_state=rng + ).fit(X) + assert_almost_equal(mean_prior, bgmm.mean_prior_) + + # Check correct init for the default value of bemean_priorta + bgmm = BayesianGaussianMixture(n_components=n_components, random_state=rng).fit(X) + assert_almost_equal(X.mean(axis=0), bgmm.mean_prior_) + + +def test_bayesian_mixture_precisions_prior_initialisation(): + rng = np.random.RandomState(0) + n_samples, n_features = 10, 2 + X = rng.rand(n_samples, n_features) + + # Check raise message for a bad value of degrees_of_freedom_prior + bad_degrees_of_freedom_prior_ = n_features - 1.0 + bgmm = BayesianGaussianMixture( + degrees_of_freedom_prior=bad_degrees_of_freedom_prior_, random_state=rng + ) + msg = ( + "The parameter 'degrees_of_freedom_prior' should be greater than" + f" {n_features -1}, but got {bad_degrees_of_freedom_prior_:.3f}." + ) + with pytest.raises(ValueError, match=msg): + bgmm.fit(X) + + # Check correct init for a given value of degrees_of_freedom_prior + degrees_of_freedom_prior = rng.rand() + n_features - 1.0 + bgmm = BayesianGaussianMixture( + degrees_of_freedom_prior=degrees_of_freedom_prior, random_state=rng + ).fit(X) + assert_almost_equal(degrees_of_freedom_prior, bgmm.degrees_of_freedom_prior_) + + # Check correct init for the default value of degrees_of_freedom_prior + degrees_of_freedom_prior_default = n_features + bgmm = BayesianGaussianMixture( + degrees_of_freedom_prior=degrees_of_freedom_prior_default, random_state=rng + ).fit(X) + assert_almost_equal( + degrees_of_freedom_prior_default, bgmm.degrees_of_freedom_prior_ + ) + + # Check correct init for a given value of covariance_prior + covariance_prior = { + "full": np.cov(X.T, bias=1) + 10, + "tied": np.cov(X.T, bias=1) + 5, + "diag": np.diag(np.atleast_2d(np.cov(X.T, bias=1))) + 3, + "spherical": rng.rand(), + } + + bgmm = BayesianGaussianMixture(random_state=rng) + for cov_type in ["full", "tied", "diag", "spherical"]: + bgmm.covariance_type = cov_type + bgmm.covariance_prior = covariance_prior[cov_type] + bgmm.fit(X) + assert_almost_equal(covariance_prior[cov_type], bgmm.covariance_prior_) + + # Check correct init for the default value of covariance_prior + covariance_prior_default = { + "full": np.atleast_2d(np.cov(X.T)), + "tied": np.atleast_2d(np.cov(X.T)), + "diag": np.var(X, axis=0, ddof=1), + "spherical": np.var(X, axis=0, ddof=1).mean(), + } + + bgmm = BayesianGaussianMixture(random_state=0) + for cov_type in ["full", "tied", "diag", "spherical"]: + bgmm.covariance_type = cov_type + bgmm.fit(X) + assert_almost_equal(covariance_prior_default[cov_type], bgmm.covariance_prior_) + + +def test_bayesian_mixture_check_is_fitted(): + rng = np.random.RandomState(0) + n_samples, n_features = 10, 2 + + # Check raise message + bgmm = BayesianGaussianMixture(random_state=rng) + X = rng.rand(n_samples, n_features) + + msg = "This BayesianGaussianMixture instance is not fitted yet." + with pytest.raises(ValueError, match=msg): + bgmm.score(X) + + +def test_bayesian_mixture_weights(): + rng = np.random.RandomState(0) + n_samples, n_features = 10, 2 + + X = rng.rand(n_samples, n_features) + + # Case Dirichlet distribution for the weight concentration prior type + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type="dirichlet_distribution", + n_components=3, + random_state=rng, + ).fit(X) + + expected_weights = bgmm.weight_concentration_ / np.sum(bgmm.weight_concentration_) + assert_almost_equal(expected_weights, bgmm.weights_) + assert_almost_equal(np.sum(bgmm.weights_), 1.0) + + # Case Dirichlet process for the weight concentration prior type + dpgmm = BayesianGaussianMixture( + weight_concentration_prior_type="dirichlet_process", + n_components=3, + random_state=rng, + ).fit(X) + weight_dirichlet_sum = ( + dpgmm.weight_concentration_[0] + dpgmm.weight_concentration_[1] + ) + tmp = dpgmm.weight_concentration_[1] / weight_dirichlet_sum + expected_weights = ( + dpgmm.weight_concentration_[0] + / weight_dirichlet_sum + * np.hstack((1, np.cumprod(tmp[:-1]))) + ) + expected_weights /= np.sum(expected_weights) + assert_almost_equal(expected_weights, dpgmm.weights_) + assert_almost_equal(np.sum(dpgmm.weights_), 1.0) + + +@ignore_warnings(category=ConvergenceWarning) +def test_monotonic_likelihood(): + # We check that each step of the each step of variational inference without + # regularization improve monotonically the training set of the bound + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=20) + n_components = rand_data.n_components + + for prior_type in PRIOR_TYPE: + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type=covar_type, + warm_start=True, + max_iter=1, + random_state=rng, + tol=1e-3, + ) + current_lower_bound = -np.inf + # Do one training iteration at a time so we can make sure that the + # training log likelihood increases after each iteration. + for _ in range(600): + prev_lower_bound = current_lower_bound + current_lower_bound = bgmm.fit(X).lower_bound_ + assert current_lower_bound >= prev_lower_bound + + if bgmm.converged_: + break + assert bgmm.converged_ + + +def test_compare_covar_type(): + # We can compare the 'full' precision with the other cov_type if we apply + # 1 iter of the M-step (done during _initialize_parameters). + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + X = rand_data.X["full"] + n_components = rand_data.n_components + + for prior_type in PRIOR_TYPE: + # Computation of the full_covariance + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type="full", + max_iter=1, + random_state=0, + tol=1e-7, + ) + bgmm._check_parameters(X) + bgmm._initialize_parameters(X, np.random.RandomState(0)) + full_covariances = ( + bgmm.covariances_ * bgmm.degrees_of_freedom_[:, np.newaxis, np.newaxis] + ) + + # Check tied_covariance = mean(full_covariances, 0) + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type="tied", + max_iter=1, + random_state=0, + tol=1e-7, + ) + bgmm._check_parameters(X) + bgmm._initialize_parameters(X, np.random.RandomState(0)) + + tied_covariance = bgmm.covariances_ * bgmm.degrees_of_freedom_ + assert_almost_equal(tied_covariance, np.mean(full_covariances, 0)) + + # Check diag_covariance = diag(full_covariances) + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type="diag", + max_iter=1, + random_state=0, + tol=1e-7, + ) + bgmm._check_parameters(X) + bgmm._initialize_parameters(X, np.random.RandomState(0)) + + diag_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_[:, np.newaxis] + assert_almost_equal( + diag_covariances, np.array([np.diag(cov) for cov in full_covariances]) + ) + + # Check spherical_covariance = np.mean(diag_covariances, 0) + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type="spherical", + max_iter=1, + random_state=0, + tol=1e-7, + ) + bgmm._check_parameters(X) + bgmm._initialize_parameters(X, np.random.RandomState(0)) + + spherical_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_ + assert_almost_equal(spherical_covariances, np.mean(diag_covariances, 1)) + + +@ignore_warnings(category=ConvergenceWarning) +def test_check_covariance_precision(): + # We check that the dot product of the covariance and the precision + # matrices is identity. + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components, n_features = 2 * rand_data.n_components, 2 + + # Computation of the full_covariance + bgmm = BayesianGaussianMixture( + n_components=n_components, max_iter=100, random_state=rng, tol=1e-3, reg_covar=0 + ) + for covar_type in COVARIANCE_TYPE: + bgmm.covariance_type = covar_type + bgmm.fit(rand_data.X[covar_type]) + + if covar_type == "full": + for covar, precision in zip(bgmm.covariances_, bgmm.precisions_): + assert_almost_equal(np.dot(covar, precision), np.eye(n_features)) + elif covar_type == "tied": + assert_almost_equal( + np.dot(bgmm.covariances_, bgmm.precisions_), np.eye(n_features) + ) + + elif covar_type == "diag": + assert_almost_equal( + bgmm.covariances_ * bgmm.precisions_, + np.ones((n_components, n_features)), + ) + + else: + assert_almost_equal( + bgmm.covariances_ * bgmm.precisions_, np.ones(n_components) + ) + + +@ignore_warnings(category=ConvergenceWarning) +def test_invariant_translation(): + # We check here that adding a constant in the data change correctly the + # parameters of the mixture + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=100) + n_components = 2 * rand_data.n_components + + for prior_type in PRIOR_TYPE: + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + bgmm1 = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=n_components, + max_iter=100, + random_state=0, + tol=1e-3, + reg_covar=0, + ).fit(X) + bgmm2 = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=n_components, + max_iter=100, + random_state=0, + tol=1e-3, + reg_covar=0, + ).fit(X + 100) + + assert_almost_equal(bgmm1.means_, bgmm2.means_ - 100) + assert_almost_equal(bgmm1.weights_, bgmm2.weights_) + assert_almost_equal(bgmm1.covariances_, bgmm2.covariances_) + + +@pytest.mark.filterwarnings("ignore:.*did not converge.*") +@pytest.mark.parametrize( + "seed, max_iter, tol", + [ + (0, 2, 1e-7), # strict non-convergence + (1, 2, 1e-1), # loose non-convergence + (3, 300, 1e-7), # strict convergence + (4, 300, 1e-1), # loose convergence + ], +) +def test_bayesian_mixture_fit_predict(seed, max_iter, tol): + rng = np.random.RandomState(seed) + rand_data = RandomData(rng, n_samples=50, scale=7) + n_components = 2 * rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + bgmm1 = BayesianGaussianMixture( + n_components=n_components, + max_iter=max_iter, + random_state=rng, + tol=tol, + reg_covar=0, + ) + bgmm1.covariance_type = covar_type + bgmm2 = copy.deepcopy(bgmm1) + X = rand_data.X[covar_type] + + Y_pred1 = bgmm1.fit(X).predict(X) + Y_pred2 = bgmm2.fit_predict(X) + assert_array_equal(Y_pred1, Y_pred2) + + +def test_bayesian_mixture_fit_predict_n_init(): + # Check that fit_predict is equivalent to fit.predict, when n_init > 1 + X = np.random.RandomState(0).randn(50, 5) + gm = BayesianGaussianMixture(n_components=5, n_init=10, random_state=0) + y_pred1 = gm.fit_predict(X) + y_pred2 = gm.predict(X) + assert_array_equal(y_pred1, y_pred2) + + +def test_bayesian_mixture_predict_predict_proba(): + # this is the same test as test_gaussian_mixture_predict_predict_proba() + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + for prior_type in PRIOR_TYPE: + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + Y = rand_data.Y + bgmm = BayesianGaussianMixture( + n_components=rand_data.n_components, + random_state=rng, + weight_concentration_prior_type=prior_type, + covariance_type=covar_type, + ) + + # Check a warning message arrive if we don't do fit + msg = ( + "This BayesianGaussianMixture instance is not fitted yet. " + "Call 'fit' with appropriate arguments before using this " + "estimator." + ) + with pytest.raises(NotFittedError, match=msg): + bgmm.predict(X) + + bgmm.fit(X) + Y_pred = bgmm.predict(X) + Y_pred_proba = bgmm.predict_proba(X).argmax(axis=1) + assert_array_equal(Y_pred, Y_pred_proba) + assert adjusted_rand_score(Y, Y_pred) >= 0.95 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_gaussian_mixture.py b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_gaussian_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..e24a6af96637458b39e63430beecf53983e0ecf0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_gaussian_mixture.py @@ -0,0 +1,1422 @@ +# Author: Wei Xue +# Thierry Guillemot +# License: BSD 3 clause + +import copy +import itertools +import re +import sys +import warnings +from io import StringIO +from unittest.mock import Mock + +import numpy as np +import pytest +from scipy import linalg, stats + +import sklearn +from sklearn.cluster import KMeans +from sklearn.covariance import EmpiricalCovariance +from sklearn.datasets import make_spd_matrix +from sklearn.exceptions import ConvergenceWarning, NotFittedError +from sklearn.metrics.cluster import adjusted_rand_score +from sklearn.mixture import GaussianMixture +from sklearn.mixture._gaussian_mixture import ( + _compute_log_det_cholesky, + _compute_precision_cholesky, + _estimate_gaussian_covariances_diag, + _estimate_gaussian_covariances_full, + _estimate_gaussian_covariances_spherical, + _estimate_gaussian_covariances_tied, + _estimate_gaussian_parameters, +) +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.extmath import fast_logdet + +COVARIANCE_TYPE = ["full", "tied", "diag", "spherical"] + + +def generate_data(n_samples, n_features, weights, means, precisions, covariance_type): + rng = np.random.RandomState(0) + + X = [] + if covariance_type == "spherical": + for _, (w, m, c) in enumerate(zip(weights, means, precisions["spherical"])): + X.append( + rng.multivariate_normal( + m, c * np.eye(n_features), int(np.round(w * n_samples)) + ) + ) + if covariance_type == "diag": + for _, (w, m, c) in enumerate(zip(weights, means, precisions["diag"])): + X.append( + rng.multivariate_normal(m, np.diag(c), int(np.round(w * n_samples))) + ) + if covariance_type == "tied": + for _, (w, m) in enumerate(zip(weights, means)): + X.append( + rng.multivariate_normal( + m, precisions["tied"], int(np.round(w * n_samples)) + ) + ) + if covariance_type == "full": + for _, (w, m, c) in enumerate(zip(weights, means, precisions["full"])): + X.append(rng.multivariate_normal(m, c, int(np.round(w * n_samples)))) + + X = np.vstack(X) + return X + + +class RandomData: + def __init__(self, rng, n_samples=200, n_components=2, n_features=2, scale=50): + self.n_samples = n_samples + self.n_components = n_components + self.n_features = n_features + + self.weights = rng.rand(n_components) + self.weights = self.weights / self.weights.sum() + self.means = rng.rand(n_components, n_features) * scale + self.covariances = { + "spherical": 0.5 + rng.rand(n_components), + "diag": (0.5 + rng.rand(n_components, n_features)) ** 2, + "tied": make_spd_matrix(n_features, random_state=rng), + "full": np.array( + [ + make_spd_matrix(n_features, random_state=rng) * 0.5 + for _ in range(n_components) + ] + ), + } + self.precisions = { + "spherical": 1.0 / self.covariances["spherical"], + "diag": 1.0 / self.covariances["diag"], + "tied": linalg.inv(self.covariances["tied"]), + "full": np.array( + [linalg.inv(covariance) for covariance in self.covariances["full"]] + ), + } + + self.X = dict( + zip( + COVARIANCE_TYPE, + [ + generate_data( + n_samples, + n_features, + self.weights, + self.means, + self.covariances, + covar_type, + ) + for covar_type in COVARIANCE_TYPE + ], + ) + ) + self.Y = np.hstack( + [ + np.full(int(np.round(w * n_samples)), k, dtype=int) + for k, w in enumerate(self.weights) + ] + ) + + +def test_gaussian_mixture_attributes(): + # test bad parameters + rng = np.random.RandomState(0) + X = rng.rand(10, 2) + + # test good parameters + n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1 + covariance_type, init_params = "full", "random" + gmm = GaussianMixture( + n_components=n_components, + tol=tol, + n_init=n_init, + max_iter=max_iter, + reg_covar=reg_covar, + covariance_type=covariance_type, + init_params=init_params, + ).fit(X) + + assert gmm.n_components == n_components + assert gmm.covariance_type == covariance_type + assert gmm.tol == tol + assert gmm.reg_covar == reg_covar + assert gmm.max_iter == max_iter + assert gmm.n_init == n_init + assert gmm.init_params == init_params + + +def test_check_weights(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + + n_components = rand_data.n_components + X = rand_data.X["full"] + + g = GaussianMixture(n_components=n_components) + + # Check bad shape + weights_bad_shape = rng.rand(n_components, 1) + g.weights_init = weights_bad_shape + msg = re.escape( + "The parameter 'weights' should have the shape of " + f"({n_components},), but got {str(weights_bad_shape.shape)}" + ) + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check bad range + weights_bad_range = rng.rand(n_components) + 1 + g.weights_init = weights_bad_range + msg = re.escape( + "The parameter 'weights' should be in the range [0, 1], but got" + f" max value {np.min(weights_bad_range):.5f}, " + f"min value {np.max(weights_bad_range):.5f}" + ) + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check bad normalization + weights_bad_norm = rng.rand(n_components) + weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1) + g.weights_init = weights_bad_norm + msg = re.escape( + "The parameter 'weights' should be normalized, " + f"but got sum(weights) = {np.sum(weights_bad_norm):.5f}" + ) + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check good weights matrix + weights = rand_data.weights + g = GaussianMixture(weights_init=weights, n_components=n_components) + g.fit(X) + assert_array_equal(weights, g.weights_init) + + +def test_check_means(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + + n_components, n_features = rand_data.n_components, rand_data.n_features + X = rand_data.X["full"] + + g = GaussianMixture(n_components=n_components) + + # Check means bad shape + means_bad_shape = rng.rand(n_components + 1, n_features) + g.means_init = means_bad_shape + msg = "The parameter 'means' should have the shape of " + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check good means matrix + means = rand_data.means + g.means_init = means + g.fit(X) + assert_array_equal(means, g.means_init) + + +def test_check_precisions(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + + n_components, n_features = rand_data.n_components, rand_data.n_features + + # Define the bad precisions for each covariance_type + precisions_bad_shape = { + "full": np.ones((n_components + 1, n_features, n_features)), + "tied": np.ones((n_features + 1, n_features + 1)), + "diag": np.ones((n_components + 1, n_features)), + "spherical": np.ones((n_components + 1)), + } + + # Define not positive-definite precisions + precisions_not_pos = np.ones((n_components, n_features, n_features)) + precisions_not_pos[0] = np.eye(n_features) + precisions_not_pos[0, 0, 0] = -1.0 + + precisions_not_positive = { + "full": precisions_not_pos, + "tied": precisions_not_pos[0], + "diag": np.full((n_components, n_features), -1.0), + "spherical": np.full(n_components, -1.0), + } + + not_positive_errors = { + "full": "symmetric, positive-definite", + "tied": "symmetric, positive-definite", + "diag": "positive", + "spherical": "positive", + } + + for covar_type in COVARIANCE_TYPE: + X = RandomData(rng).X[covar_type] + g = GaussianMixture( + n_components=n_components, covariance_type=covar_type, random_state=rng + ) + + # Check precisions with bad shapes + g.precisions_init = precisions_bad_shape[covar_type] + msg = f"The parameter '{covar_type} precision' should have the shape of" + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check not positive precisions + g.precisions_init = precisions_not_positive[covar_type] + msg = f"'{covar_type} precision' should be {not_positive_errors[covar_type]}" + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check the correct init of precisions_init + g.precisions_init = rand_data.precisions[covar_type] + g.fit(X) + assert_array_equal(rand_data.precisions[covar_type], g.precisions_init) + + +def test_suffstat_sk_full(): + # compare the precision matrix compute from the + # EmpiricalCovariance.covariance fitted on X*sqrt(resp) + # with _sufficient_sk_full, n_components=1 + rng = np.random.RandomState(0) + n_samples, n_features = 500, 2 + + # special case 1, assuming data is "centered" + X = rng.rand(n_samples, n_features) + resp = rng.rand(n_samples, 1) + X_resp = np.sqrt(resp) * X + nk = np.array([n_samples]) + xk = np.zeros((1, n_features)) + covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) + ecov = EmpiricalCovariance(assume_centered=True) + ecov.fit(X_resp) + assert_almost_equal(ecov.error_norm(covars_pred[0], norm="frobenius"), 0) + assert_almost_equal(ecov.error_norm(covars_pred[0], norm="spectral"), 0) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred, "full") + precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred]) + precs_est = np.array([linalg.inv(cov) for cov in covars_pred]) + assert_array_almost_equal(precs_est, precs_pred) + + # special case 2, assuming resp are all ones + resp = np.ones((n_samples, 1)) + nk = np.array([n_samples]) + xk = X.mean(axis=0).reshape((1, -1)) + covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) + ecov = EmpiricalCovariance(assume_centered=False) + ecov.fit(X) + assert_almost_equal(ecov.error_norm(covars_pred[0], norm="frobenius"), 0) + assert_almost_equal(ecov.error_norm(covars_pred[0], norm="spectral"), 0) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred, "full") + precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred]) + precs_est = np.array([linalg.inv(cov) for cov in covars_pred]) + assert_array_almost_equal(precs_est, precs_pred) + + +def test_suffstat_sk_tied(): + # use equation Nk * Sk / N = S_tied + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 500, 2, 2 + + resp = rng.rand(n_samples, n_components) + resp = resp / resp.sum(axis=1)[:, np.newaxis] + X = rng.rand(n_samples, n_features) + nk = resp.sum(axis=0) + xk = np.dot(resp.T, X) / nk[:, np.newaxis] + + covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) + covars_pred_full = ( + np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full, 0) / n_samples + ) + + covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0) + + ecov = EmpiricalCovariance() + ecov.covariance_ = covars_pred_full + assert_almost_equal(ecov.error_norm(covars_pred_tied, norm="frobenius"), 0) + assert_almost_equal(ecov.error_norm(covars_pred_tied, norm="spectral"), 0) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, "tied") + precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T) + precs_est = linalg.inv(covars_pred_tied) + assert_array_almost_equal(precs_est, precs_pred) + + +def test_suffstat_sk_diag(): + # test against 'full' case + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 500, 2, 2 + + resp = rng.rand(n_samples, n_components) + resp = resp / resp.sum(axis=1)[:, np.newaxis] + X = rng.rand(n_samples, n_features) + nk = resp.sum(axis=0) + xk = np.dot(resp.T, X) / nk[:, np.newaxis] + covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) + covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0) + + ecov = EmpiricalCovariance() + for cov_full, cov_diag in zip(covars_pred_full, covars_pred_diag): + ecov.covariance_ = np.diag(np.diag(cov_full)) + cov_diag = np.diag(cov_diag) + assert_almost_equal(ecov.error_norm(cov_diag, norm="frobenius"), 0) + assert_almost_equal(ecov.error_norm(cov_diag, norm="spectral"), 0) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, "diag") + assert_almost_equal(covars_pred_diag, 1.0 / precs_chol_pred**2) + + +def test_gaussian_suffstat_sk_spherical(): + # computing spherical covariance equals to the variance of one-dimension + # data after flattening, n_components=1 + rng = np.random.RandomState(0) + n_samples, n_features = 500, 2 + + X = rng.rand(n_samples, n_features) + X = X - X.mean() + resp = np.ones((n_samples, 1)) + nk = np.array([n_samples]) + xk = X.mean() + covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X, nk, xk, 0) + covars_pred_spherical2 = np.dot(X.flatten().T, X.flatten()) / ( + n_features * n_samples + ) + assert_almost_equal(covars_pred_spherical, covars_pred_spherical2) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical, "spherical") + assert_almost_equal(covars_pred_spherical, 1.0 / precs_chol_pred**2) + + +def test_compute_log_det_cholesky(): + n_features = 2 + rand_data = RandomData(np.random.RandomState(0)) + + for covar_type in COVARIANCE_TYPE: + covariance = rand_data.covariances[covar_type] + + if covar_type == "full": + predected_det = np.array([linalg.det(cov) for cov in covariance]) + elif covar_type == "tied": + predected_det = linalg.det(covariance) + elif covar_type == "diag": + predected_det = np.array([np.prod(cov) for cov in covariance]) + elif covar_type == "spherical": + predected_det = covariance**n_features + + # We compute the cholesky decomposition of the covariance matrix + expected_det = _compute_log_det_cholesky( + _compute_precision_cholesky(covariance, covar_type), + covar_type, + n_features=n_features, + ) + assert_array_almost_equal(expected_det, -0.5 * np.log(predected_det)) + + +def _naive_lmvnpdf_diag(X, means, covars): + resp = np.empty((len(X), len(means))) + stds = np.sqrt(covars) + for i, (mean, std) in enumerate(zip(means, stds)): + resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1) + return resp + + +def test_gaussian_mixture_log_probabilities(): + from sklearn.mixture._gaussian_mixture import _estimate_log_gaussian_prob + + # test against with _naive_lmvnpdf_diag + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_samples = 500 + n_features = rand_data.n_features + n_components = rand_data.n_components + + means = rand_data.means + covars_diag = rng.rand(n_components, n_features) + X = rng.rand(n_samples, n_features) + log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag) + + # full covariances + precs_full = np.array([np.diag(1.0 / np.sqrt(x)) for x in covars_diag]) + + log_prob = _estimate_log_gaussian_prob(X, means, precs_full, "full") + assert_array_almost_equal(log_prob, log_prob_naive) + + # diag covariances + precs_chol_diag = 1.0 / np.sqrt(covars_diag) + log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, "diag") + assert_array_almost_equal(log_prob, log_prob_naive) + + # tied + covars_tied = np.array([x for x in covars_diag]).mean(axis=0) + precs_tied = np.diag(np.sqrt(1.0 / covars_tied)) + + log_prob_naive = _naive_lmvnpdf_diag(X, means, [covars_tied] * n_components) + log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, "tied") + + assert_array_almost_equal(log_prob, log_prob_naive) + + # spherical + covars_spherical = covars_diag.mean(axis=1) + precs_spherical = 1.0 / np.sqrt(covars_diag.mean(axis=1)) + log_prob_naive = _naive_lmvnpdf_diag( + X, means, [[k] * n_features for k in covars_spherical] + ) + log_prob = _estimate_log_gaussian_prob(X, means, precs_spherical, "spherical") + assert_array_almost_equal(log_prob, log_prob_naive) + + +# skip tests on weighted_log_probabilities, log_weights + + +def test_gaussian_mixture_estimate_log_prob_resp(): + # test whether responsibilities are normalized + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=5) + n_samples = rand_data.n_samples + n_features = rand_data.n_features + n_components = rand_data.n_components + + X = rng.rand(n_samples, n_features) + for covar_type in COVARIANCE_TYPE: + weights = rand_data.weights + means = rand_data.means + precisions = rand_data.precisions[covar_type] + g = GaussianMixture( + n_components=n_components, + random_state=rng, + weights_init=weights, + means_init=means, + precisions_init=precisions, + covariance_type=covar_type, + ) + g.fit(X) + resp = g.predict_proba(X) + assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples)) + assert_array_equal(g.weights_init, weights) + assert_array_equal(g.means_init, means) + assert_array_equal(g.precisions_init, precisions) + + +def test_gaussian_mixture_predict_predict_proba(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + Y = rand_data.Y + g = GaussianMixture( + n_components=rand_data.n_components, + random_state=rng, + weights_init=rand_data.weights, + means_init=rand_data.means, + precisions_init=rand_data.precisions[covar_type], + covariance_type=covar_type, + ) + + # Check a warning message arrive if we don't do fit + msg = ( + "This GaussianMixture instance is not fitted yet. Call 'fit' " + "with appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=msg): + g.predict(X) + + g.fit(X) + Y_pred = g.predict(X) + Y_pred_proba = g.predict_proba(X).argmax(axis=1) + assert_array_equal(Y_pred, Y_pred_proba) + assert adjusted_rand_score(Y, Y_pred) > 0.95 + + +@pytest.mark.filterwarnings("ignore:.*did not converge.*") +@pytest.mark.parametrize( + "seed, max_iter, tol", + [ + (0, 2, 1e-7), # strict non-convergence + (1, 2, 1e-1), # loose non-convergence + (3, 300, 1e-7), # strict convergence + (4, 300, 1e-1), # loose convergence + ], +) +def test_gaussian_mixture_fit_predict(seed, max_iter, tol): + rng = np.random.RandomState(seed) + rand_data = RandomData(rng) + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + Y = rand_data.Y + g = GaussianMixture( + n_components=rand_data.n_components, + random_state=rng, + weights_init=rand_data.weights, + means_init=rand_data.means, + precisions_init=rand_data.precisions[covar_type], + covariance_type=covar_type, + max_iter=max_iter, + tol=tol, + ) + + # check if fit_predict(X) is equivalent to fit(X).predict(X) + f = copy.deepcopy(g) + Y_pred1 = f.fit(X).predict(X) + Y_pred2 = g.fit_predict(X) + assert_array_equal(Y_pred1, Y_pred2) + assert adjusted_rand_score(Y, Y_pred2) > 0.95 + + +def test_gaussian_mixture_fit_predict_n_init(): + # Check that fit_predict is equivalent to fit.predict, when n_init > 1 + X = np.random.RandomState(0).randn(1000, 5) + gm = GaussianMixture(n_components=5, n_init=5, random_state=0) + y_pred1 = gm.fit_predict(X) + y_pred2 = gm.predict(X) + assert_array_equal(y_pred1, y_pred2) + + +def test_gaussian_mixture_fit(): + # recover the ground truth + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_features = rand_data.n_features + n_components = rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + g = GaussianMixture( + n_components=n_components, + n_init=20, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + g.fit(X) + + # needs more data to pass the test with rtol=1e-7 + assert_allclose( + np.sort(g.weights_), np.sort(rand_data.weights), rtol=0.1, atol=1e-2 + ) + + arg_idx1 = g.means_[:, 0].argsort() + arg_idx2 = rand_data.means[:, 0].argsort() + assert_allclose( + g.means_[arg_idx1], rand_data.means[arg_idx2], rtol=0.1, atol=1e-2 + ) + + if covar_type == "full": + prec_pred = g.precisions_ + prec_test = rand_data.precisions["full"] + elif covar_type == "tied": + prec_pred = np.array([g.precisions_] * n_components) + prec_test = np.array([rand_data.precisions["tied"]] * n_components) + elif covar_type == "spherical": + prec_pred = np.array([np.eye(n_features) * c for c in g.precisions_]) + prec_test = np.array( + [np.eye(n_features) * c for c in rand_data.precisions["spherical"]] + ) + elif covar_type == "diag": + prec_pred = np.array([np.diag(d) for d in g.precisions_]) + prec_test = np.array([np.diag(d) for d in rand_data.precisions["diag"]]) + + arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort() + arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort() + for k, h in zip(arg_idx1, arg_idx2): + ecov = EmpiricalCovariance() + ecov.covariance_ = prec_test[h] + # the accuracy depends on the number of data and randomness, rng + assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.15) + + +def test_gaussian_mixture_fit_best_params(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_components = rand_data.n_components + n_init = 10 + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + g = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + ll = [] + for _ in range(n_init): + g.fit(X) + ll.append(g.score(X)) + ll = np.array(ll) + g_best = GaussianMixture( + n_components=n_components, + n_init=n_init, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + g_best.fit(X) + assert_almost_equal(ll.min(), g_best.score(X)) + + +def test_gaussian_mixture_fit_convergence_warning(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=1) + n_components = rand_data.n_components + max_iter = 1 + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + g = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=max_iter, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + msg = ( + f"Initialization {max_iter} did not converge. Try different init " + "parameters, or increase max_iter, tol or check for degenerate" + " data." + ) + with pytest.warns(ConvergenceWarning, match=msg): + g.fit(X) + + +def test_multiple_init(): + # Test that multiple inits does not much worse than a single one + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 50, 5, 2 + X = rng.randn(n_samples, n_features) + for cv_type in COVARIANCE_TYPE: + train1 = ( + GaussianMixture( + n_components=n_components, covariance_type=cv_type, random_state=0 + ) + .fit(X) + .score(X) + ) + train2 = ( + GaussianMixture( + n_components=n_components, + covariance_type=cv_type, + random_state=0, + n_init=5, + ) + .fit(X) + .score(X) + ) + assert train2 >= train1 + + +def test_gaussian_mixture_n_parameters(): + # Test that the right number of parameters is estimated + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 50, 5, 2 + X = rng.randn(n_samples, n_features) + n_params = {"spherical": 13, "diag": 21, "tied": 26, "full": 41} + for cv_type in COVARIANCE_TYPE: + g = GaussianMixture( + n_components=n_components, covariance_type=cv_type, random_state=rng + ).fit(X) + assert g._n_parameters() == n_params[cv_type] + + +def test_bic_1d_1component(): + # Test all of the covariance_types return the same BIC score for + # 1-dimensional, 1 component fits. + rng = np.random.RandomState(0) + n_samples, n_dim, n_components = 100, 1, 1 + X = rng.randn(n_samples, n_dim) + bic_full = ( + GaussianMixture( + n_components=n_components, covariance_type="full", random_state=rng + ) + .fit(X) + .bic(X) + ) + for covariance_type in ["tied", "diag", "spherical"]: + bic = ( + GaussianMixture( + n_components=n_components, + covariance_type=covariance_type, + random_state=rng, + ) + .fit(X) + .bic(X) + ) + assert_almost_equal(bic_full, bic) + + +def test_gaussian_mixture_aic_bic(): + # Test the aic and bic criteria + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 50, 3, 2 + X = rng.randn(n_samples, n_features) + # standard gaussian entropy + sgh = 0.5 * ( + fast_logdet(np.cov(X.T, bias=1)) + n_features * (1 + np.log(2 * np.pi)) + ) + for cv_type in COVARIANCE_TYPE: + g = GaussianMixture( + n_components=n_components, + covariance_type=cv_type, + random_state=rng, + max_iter=200, + ) + g.fit(X) + aic = 2 * n_samples * sgh + 2 * g._n_parameters() + bic = 2 * n_samples * sgh + np.log(n_samples) * g._n_parameters() + bound = n_features / np.sqrt(n_samples) + assert (g.aic(X) - aic) / n_samples < bound + assert (g.bic(X) - bic) / n_samples < bound + + +def test_gaussian_mixture_verbose(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_components = rand_data.n_components + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + g = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + verbose=1, + ) + h = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + verbose=2, + ) + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + g.fit(X) + h.fit(X) + finally: + sys.stdout = old_stdout + + +@pytest.mark.filterwarnings("ignore:.*did not converge.*") +@pytest.mark.parametrize("seed", (0, 1, 2)) +def test_warm_start(seed): + random_state = seed + rng = np.random.RandomState(random_state) + n_samples, n_features, n_components = 500, 2, 2 + X = rng.rand(n_samples, n_features) + + # Assert the warm_start give the same result for the same number of iter + g = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=2, + reg_covar=0, + random_state=random_state, + warm_start=False, + ) + h = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=1, + reg_covar=0, + random_state=random_state, + warm_start=True, + ) + + g.fit(X) + score1 = h.fit(X).score(X) + score2 = h.fit(X).score(X) + + assert_almost_equal(g.weights_, h.weights_) + assert_almost_equal(g.means_, h.means_) + assert_almost_equal(g.precisions_, h.precisions_) + assert score2 > score1 + + # Assert that by using warm_start we can converge to a good solution + g = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=5, + reg_covar=0, + random_state=random_state, + warm_start=False, + tol=1e-6, + ) + h = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=5, + reg_covar=0, + random_state=random_state, + warm_start=True, + tol=1e-6, + ) + + g.fit(X) + assert not g.converged_ + + h.fit(X) + # depending on the data there is large variability in the number of + # refit necessary to converge due to the complete randomness of the + # data + for _ in range(1000): + h.fit(X) + if h.converged_: + break + assert h.converged_ + + +@ignore_warnings(category=ConvergenceWarning) +def test_convergence_detected_with_warm_start(): + # We check that convergence is detected when warm_start=True + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_components = rand_data.n_components + X = rand_data.X["full"] + + for max_iter in (1, 2, 50): + gmm = GaussianMixture( + n_components=n_components, + warm_start=True, + max_iter=max_iter, + random_state=rng, + ) + for _ in range(100): + gmm.fit(X) + if gmm.converged_: + break + assert gmm.converged_ + assert max_iter >= gmm.n_iter_ + + +def test_score(): + covar_type = "full" + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components = rand_data.n_components + X = rand_data.X[covar_type] + + # Check the error message if we don't call fit + gmm1 = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + msg = ( + "This GaussianMixture instance is not fitted yet. Call 'fit' with " + "appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=msg): + gmm1.score(X) + + # Check score value + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + gmm1.fit(X) + gmm_score = gmm1.score(X) + gmm_score_proba = gmm1.score_samples(X).mean() + assert_almost_equal(gmm_score, gmm_score_proba) + + # Check if the score increase + gmm2 = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ).fit(X) + assert gmm2.score(X) > gmm1.score(X) + + +def test_score_samples(): + covar_type = "full" + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components = rand_data.n_components + X = rand_data.X[covar_type] + + # Check the error message if we don't call fit + gmm = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + msg = ( + "This GaussianMixture instance is not fitted yet. Call 'fit' with " + "appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=msg): + gmm.score_samples(X) + + gmm_score_samples = gmm.fit(X).score_samples(X) + assert gmm_score_samples.shape[0] == rand_data.n_samples + + +def test_monotonic_likelihood(): + # We check that each step of the EM without regularization improve + # monotonically the training set likelihood + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components = rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + gmm = GaussianMixture( + n_components=n_components, + covariance_type=covar_type, + reg_covar=0, + warm_start=True, + max_iter=1, + random_state=rng, + tol=1e-7, + ) + current_log_likelihood = -np.inf + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + # Do one training iteration at a time so we can make sure that the + # training log likelihood increases after each iteration. + for _ in range(600): + prev_log_likelihood = current_log_likelihood + current_log_likelihood = gmm.fit(X).score(X) + assert current_log_likelihood >= prev_log_likelihood + + if gmm.converged_: + break + + assert gmm.converged_ + + +def test_regularisation(): + # We train the GaussianMixture on degenerate data by defining two clusters + # of a 0 covariance. + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = np.vstack( + (np.ones((n_samples // 2, n_features)), np.zeros((n_samples // 2, n_features))) + ) + + for covar_type in COVARIANCE_TYPE: + gmm = GaussianMixture( + n_components=n_samples, + reg_covar=0, + covariance_type=covar_type, + random_state=rng, + ) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + msg = re.escape( + "Fitting the mixture model failed because some components have" + " ill-defined empirical covariance (for instance caused by " + "singleton or collapsed samples). Try to decrease the number " + "of components, or increase reg_covar." + ) + with pytest.raises(ValueError, match=msg): + gmm.fit(X) + + gmm.set_params(reg_covar=1e-6).fit(X) + + +def test_property(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components = rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + gmm = GaussianMixture( + n_components=n_components, + covariance_type=covar_type, + random_state=rng, + n_init=5, + ) + gmm.fit(X) + if covar_type == "full": + for prec, covar in zip(gmm.precisions_, gmm.covariances_): + assert_array_almost_equal(linalg.inv(prec), covar) + elif covar_type == "tied": + assert_array_almost_equal(linalg.inv(gmm.precisions_), gmm.covariances_) + else: + assert_array_almost_equal(gmm.precisions_, 1.0 / gmm.covariances_) + + +def test_sample(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7, n_components=3) + n_features, n_components = rand_data.n_features, rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + + gmm = GaussianMixture( + n_components=n_components, covariance_type=covar_type, random_state=rng + ) + # To sample we need that GaussianMixture is fitted + msg = "This GaussianMixture instance is not fitted" + with pytest.raises(NotFittedError, match=msg): + gmm.sample(0) + gmm.fit(X) + + msg = "Invalid value for 'n_samples'" + with pytest.raises(ValueError, match=msg): + gmm.sample(0) + + # Just to make sure the class samples correctly + n_samples = 20000 + X_s, y_s = gmm.sample(n_samples) + + for k in range(n_components): + if covar_type == "full": + assert_array_almost_equal( + gmm.covariances_[k], np.cov(X_s[y_s == k].T), decimal=1 + ) + elif covar_type == "tied": + assert_array_almost_equal( + gmm.covariances_, np.cov(X_s[y_s == k].T), decimal=1 + ) + elif covar_type == "diag": + assert_array_almost_equal( + gmm.covariances_[k], np.diag(np.cov(X_s[y_s == k].T)), decimal=1 + ) + else: + assert_array_almost_equal( + gmm.covariances_[k], + np.var(X_s[y_s == k] - gmm.means_[k]), + decimal=1, + ) + + means_s = np.array([np.mean(X_s[y_s == k], 0) for k in range(n_components)]) + assert_array_almost_equal(gmm.means_, means_s, decimal=1) + + # Check shapes of sampled data, see + # https://github.com/scikit-learn/scikit-learn/issues/7701 + assert X_s.shape == (n_samples, n_features) + + for sample_size in range(1, 100): + X_s, _ = gmm.sample(sample_size) + assert X_s.shape == (sample_size, n_features) + + +@ignore_warnings(category=ConvergenceWarning) +def test_init(): + # We check that by increasing the n_init number we have a better solution + for random_state in range(15): + rand_data = RandomData( + np.random.RandomState(random_state), n_samples=50, scale=1 + ) + n_components = rand_data.n_components + X = rand_data.X["full"] + + gmm1 = GaussianMixture( + n_components=n_components, n_init=1, max_iter=1, random_state=random_state + ).fit(X) + gmm2 = GaussianMixture( + n_components=n_components, n_init=10, max_iter=1, random_state=random_state + ).fit(X) + + assert gmm2.lower_bound_ >= gmm1.lower_bound_ + + +def test_gaussian_mixture_setting_best_params(): + """`GaussianMixture`'s best_parameters, `n_iter_` and `lower_bound_` + must be set appropriately in the case of divergence. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/18216 + """ + rnd = np.random.RandomState(0) + n_samples = 30 + X = rnd.uniform(size=(n_samples, 3)) + + # following initialization parameters were found to lead to divergence + means_init = np.array( + [ + [0.670637869618158, 0.21038256107384043, 0.12892629765485303], + [0.09394051075844147, 0.5759464955561779, 0.929296197576212], + [0.5033230372781258, 0.9569852381759425, 0.08654043447295741], + [0.18578301420435747, 0.5531158970919143, 0.19388943970532435], + [0.4548589928173794, 0.35182513658825276, 0.568146063202464], + [0.609279894978321, 0.7929063819678847, 0.9620097270828052], + ] + ) + precisions_init = np.array( + [ + 999999.999604483, + 999999.9990869573, + 553.7603944542167, + 204.78596008931834, + 15.867423501783637, + 85.4595728389735, + ] + ) + weights_init = [ + 0.03333333333333341, + 0.03333333333333341, + 0.06666666666666674, + 0.06666666666666674, + 0.7000000000000001, + 0.10000000000000007, + ] + + gmm = GaussianMixture( + covariance_type="spherical", + reg_covar=0, + means_init=means_init, + weights_init=weights_init, + random_state=rnd, + n_components=len(weights_init), + precisions_init=precisions_init, + max_iter=1, + ) + # ensure that no error is thrown during fit + gmm.fit(X) + + # check that the fit did not converge + assert not gmm.converged_ + + # check that parameters are set for gmm + for attr in [ + "weights_", + "means_", + "covariances_", + "precisions_cholesky_", + "n_iter_", + "lower_bound_", + ]: + assert hasattr(gmm, attr) + + +@pytest.mark.parametrize( + "init_params", ["random", "random_from_data", "k-means++", "kmeans"] +) +def test_init_means_not_duplicated(init_params, global_random_seed): + # Check that all initialisations provide not duplicated starting means + rng = np.random.RandomState(global_random_seed) + rand_data = RandomData(rng, scale=5) + n_components = rand_data.n_components + X = rand_data.X["full"] + + gmm = GaussianMixture( + n_components=n_components, init_params=init_params, random_state=rng, max_iter=0 + ) + gmm.fit(X) + + means = gmm.means_ + for i_mean, j_mean in itertools.combinations(means, r=2): + assert not np.allclose(i_mean, j_mean) + + +@pytest.mark.parametrize( + "init_params", ["random", "random_from_data", "k-means++", "kmeans"] +) +def test_means_for_all_inits(init_params, global_random_seed): + # Check fitted means properties for all initializations + rng = np.random.RandomState(global_random_seed) + rand_data = RandomData(rng, scale=5) + n_components = rand_data.n_components + X = rand_data.X["full"] + + gmm = GaussianMixture( + n_components=n_components, init_params=init_params, random_state=rng + ) + gmm.fit(X) + + assert gmm.means_.shape == (n_components, X.shape[1]) + assert np.all(X.min(axis=0) <= gmm.means_) + assert np.all(gmm.means_ <= X.max(axis=0)) + assert gmm.converged_ + + +def test_max_iter_zero(): + # Check that max_iter=0 returns initialisation as expected + # Pick arbitrary initial means and check equal to max_iter=0 + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=5) + n_components = rand_data.n_components + X = rand_data.X["full"] + means_init = [[20, 30], [30, 25]] + gmm = GaussianMixture( + n_components=n_components, + random_state=rng, + means_init=means_init, + tol=1e-06, + max_iter=0, + ) + gmm.fit(X) + + assert_allclose(gmm.means_, means_init) + + +def test_gaussian_mixture_precisions_init_diag(): + """Check that we properly initialize `precision_cholesky_` when we manually + provide the precision matrix. + + In this regard, we check the consistency between estimating the precision + matrix and providing the same precision matrix as initialization. It should + lead to the same results with the same number of iterations. + + If the initialization is wrong then the number of iterations will increase. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/16944 + """ + # generate a toy dataset + n_samples = 300 + rng = np.random.RandomState(0) + shifted_gaussian = rng.randn(n_samples, 2) + np.array([20, 20]) + C = np.array([[0.0, -0.7], [3.5, 0.7]]) + stretched_gaussian = np.dot(rng.randn(n_samples, 2), C) + X = np.vstack([shifted_gaussian, stretched_gaussian]) + + # common parameters to check the consistency of precision initialization + n_components, covariance_type, reg_covar, random_state = 2, "diag", 1e-6, 0 + + # execute the manual initialization to compute the precision matrix: + # - run KMeans to have an initial guess + # - estimate the covariance + # - compute the precision matrix from the estimated covariance + resp = np.zeros((X.shape[0], n_components)) + label = ( + KMeans(n_clusters=n_components, n_init=1, random_state=random_state) + .fit(X) + .labels_ + ) + resp[np.arange(X.shape[0]), label] = 1 + _, _, covariance = _estimate_gaussian_parameters( + X, resp, reg_covar=reg_covar, covariance_type=covariance_type + ) + precisions_init = 1 / covariance + + gm_with_init = GaussianMixture( + n_components=n_components, + covariance_type=covariance_type, + reg_covar=reg_covar, + precisions_init=precisions_init, + random_state=random_state, + ).fit(X) + + gm_without_init = GaussianMixture( + n_components=n_components, + covariance_type=covariance_type, + reg_covar=reg_covar, + random_state=random_state, + ).fit(X) + + assert gm_without_init.n_iter_ == gm_with_init.n_iter_ + assert_allclose( + gm_with_init.precisions_cholesky_, gm_without_init.precisions_cholesky_ + ) + + +def _generate_data(seed, n_samples, n_features, n_components): + """Randomly generate samples and responsibilities.""" + rs = np.random.RandomState(seed) + X = rs.random_sample((n_samples, n_features)) + resp = rs.random_sample((n_samples, n_components)) + resp /= resp.sum(axis=1)[:, np.newaxis] + return X, resp + + +def _calculate_precisions(X, resp, covariance_type): + """Calculate precision matrix of X and its Cholesky decomposition + for the given covariance type. + """ + reg_covar = 1e-6 + weights, means, covariances = _estimate_gaussian_parameters( + X, resp, reg_covar, covariance_type + ) + precisions_cholesky = _compute_precision_cholesky(covariances, covariance_type) + + _, n_components = resp.shape + # Instantiate a `GaussianMixture` model in order to use its + # `_set_parameters` method to return the `precisions_` and + # `precisions_cholesky_` from matching the `covariance_type` + # provided. + gmm = GaussianMixture(n_components=n_components, covariance_type=covariance_type) + params = (weights, means, covariances, precisions_cholesky) + gmm._set_parameters(params) + return gmm.precisions_, gmm.precisions_cholesky_ + + +@pytest.mark.parametrize("covariance_type", COVARIANCE_TYPE) +def test_gaussian_mixture_precisions_init(covariance_type, global_random_seed): + """Non-regression test for #26415.""" + + X, resp = _generate_data( + seed=global_random_seed, + n_samples=100, + n_features=3, + n_components=4, + ) + + precisions_init, desired_precisions_cholesky = _calculate_precisions( + X, resp, covariance_type + ) + gmm = GaussianMixture( + covariance_type=covariance_type, precisions_init=precisions_init + ) + gmm._initialize(X, resp) + actual_precisions_cholesky = gmm.precisions_cholesky_ + assert_allclose(actual_precisions_cholesky, desired_precisions_cholesky) + + +def test_gaussian_mixture_single_component_stable(): + """ + Non-regression test for #23032 ensuring 1-component GM works on only a + few samples. + """ + rng = np.random.RandomState(0) + X = rng.multivariate_normal(np.zeros(2), np.identity(2), size=3) + gm = GaussianMixture(n_components=1) + gm.fit(X).sample() + + +def test_gaussian_mixture_all_init_does_not_estimate_gaussian_parameters( + monkeypatch, + global_random_seed, +): + """When all init parameters are provided, the Gaussian parameters + are not estimated. + + Non-regression test for gh-26015. + """ + + mock = Mock(side_effect=_estimate_gaussian_parameters) + monkeypatch.setattr( + sklearn.mixture._gaussian_mixture, "_estimate_gaussian_parameters", mock + ) + + rng = np.random.RandomState(global_random_seed) + rand_data = RandomData(rng) + + gm = GaussianMixture( + n_components=rand_data.n_components, + weights_init=rand_data.weights, + means_init=rand_data.means, + precisions_init=rand_data.precisions["full"], + random_state=rng, + ) + gm.fit(rand_data.X["full"]) + # The initial gaussian parameters are not estimated. They are estimated for every + # m_step. + assert mock.call_count == gm.n_iter_ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_mixture.py b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ea3494f0e7d086968d3f9ff7eac0ecdcf51a96 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/mixture/tests/test_mixture.py @@ -0,0 +1,30 @@ +# Author: Guillaume Lemaitre +# License: BSD 3 clause + +import numpy as np +import pytest + +from sklearn.mixture import BayesianGaussianMixture, GaussianMixture + + +@pytest.mark.parametrize("estimator", [GaussianMixture(), BayesianGaussianMixture()]) +def test_gaussian_mixture_n_iter(estimator): + # check that n_iter is the number of iteration performed. + rng = np.random.RandomState(0) + X = rng.rand(10, 5) + max_iter = 1 + estimator.set_params(max_iter=max_iter) + estimator.fit(X) + assert estimator.n_iter_ == max_iter + + +@pytest.mark.parametrize("estimator", [GaussianMixture(), BayesianGaussianMixture()]) +def test_mixture_n_components_greater_than_n_samples_error(estimator): + """Check error when n_components <= n_samples""" + rng = np.random.RandomState(0) + X = rng.rand(10, 5) + estimator.set_params(n_components=12) + + msg = "Expected n_samples >= n_components" + with pytest.raises(ValueError, match=msg): + estimator.fit(X) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c730a71260808e76df302bda674232f06b53499e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/__init__.py @@ -0,0 +1,63 @@ +""" +The :mod:`sklearn.preprocessing` module includes scaling, centering, +normalization, binarization methods. +""" + +from ._data import ( + Binarizer, + KernelCenterer, + MaxAbsScaler, + MinMaxScaler, + Normalizer, + PowerTransformer, + QuantileTransformer, + RobustScaler, + StandardScaler, + add_dummy_feature, + binarize, + maxabs_scale, + minmax_scale, + normalize, + power_transform, + quantile_transform, + robust_scale, + scale, +) +from ._discretization import KBinsDiscretizer +from ._encoders import OneHotEncoder, OrdinalEncoder +from ._function_transformer import FunctionTransformer +from ._label import LabelBinarizer, LabelEncoder, MultiLabelBinarizer, label_binarize +from ._polynomial import PolynomialFeatures, SplineTransformer +from ._target_encoder import TargetEncoder + +__all__ = [ + "Binarizer", + "FunctionTransformer", + "KBinsDiscretizer", + "KernelCenterer", + "LabelBinarizer", + "LabelEncoder", + "MultiLabelBinarizer", + "MinMaxScaler", + "MaxAbsScaler", + "QuantileTransformer", + "Normalizer", + "OneHotEncoder", + "OrdinalEncoder", + "PowerTransformer", + "RobustScaler", + "SplineTransformer", + "StandardScaler", + "TargetEncoder", + "add_dummy_feature", + "PolynomialFeatures", + "binarize", + "normalize", + "scale", + "robust_scale", + "maxabs_scale", + "minmax_scale", + "label_binarize", + "quantile_transform", + "power_transform", +] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_csr_polynomial_expansion.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_csr_polynomial_expansion.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e780835f806e98022563be901427d7f75c3bdb00 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_csr_polynomial_expansion.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_data.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_data.py new file mode 100644 index 0000000000000000000000000000000000000000..8ec8a840298f074f2556d375aba87e374906ddc9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_data.py @@ -0,0 +1,3618 @@ +# Authors: Alexandre Gramfort +# Mathieu Blondel +# Olivier Grisel +# Andreas Mueller +# Eric Martin +# Giorgio Patrini +# Eric Chang +# License: BSD 3 clause + + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import optimize, sparse, stats +from scipy.special import boxcox + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + OneToOneFeatureMixin, + TransformerMixin, + _fit_context, +) +from ..utils import _array_api, check_array +from ..utils._array_api import get_namespace +from ..utils._param_validation import Interval, Options, StrOptions, validate_params +from ..utils.extmath import _incremental_mean_and_var, row_norms +from ..utils.sparsefuncs import ( + incr_mean_variance_axis, + inplace_column_scale, + mean_variance_axis, + min_max_axis, +) +from ..utils.sparsefuncs_fast import ( + inplace_csr_row_normalize_l1, + inplace_csr_row_normalize_l2, +) +from ..utils.validation import ( + FLOAT_DTYPES, + _check_sample_weight, + check_is_fitted, + check_random_state, +) +from ._encoders import OneHotEncoder + +BOUNDS_THRESHOLD = 1e-7 + +__all__ = [ + "Binarizer", + "KernelCenterer", + "MinMaxScaler", + "MaxAbsScaler", + "Normalizer", + "OneHotEncoder", + "RobustScaler", + "StandardScaler", + "QuantileTransformer", + "PowerTransformer", + "add_dummy_feature", + "binarize", + "normalize", + "scale", + "robust_scale", + "maxabs_scale", + "minmax_scale", + "quantile_transform", + "power_transform", +] + + +def _is_constant_feature(var, mean, n_samples): + """Detect if a feature is indistinguishable from a constant feature. + + The detection is based on its computed variance and on the theoretical + error bounds of the '2 pass algorithm' for variance computation. + + See "Algorithms for computing the sample variance: analysis and + recommendations", by Chan, Golub, and LeVeque. + """ + # In scikit-learn, variance is always computed using float64 accumulators. + eps = np.finfo(np.float64).eps + + upper_bound = n_samples * eps * var + (n_samples * mean * eps) ** 2 + return var <= upper_bound + + +def _handle_zeros_in_scale(scale, copy=True, constant_mask=None): + """Set scales of near constant features to 1. + + The goal is to avoid division by very small or zero values. + + Near constant features are detected automatically by identifying + scales close to machine precision unless they are precomputed by + the caller and passed with the `constant_mask` kwarg. + + Typically for standard scaling, the scales are the standard + deviation while near constant features are better detected on the + computed variances which are closer to machine precision by + construction. + """ + # if we are fitting on 1D arrays, scale might be a scalar + if np.isscalar(scale): + if scale == 0.0: + scale = 1.0 + return scale + # scale is an array + else: + xp, _ = get_namespace(scale) + if constant_mask is None: + # Detect near constant values to avoid dividing by a very small + # value that could lead to surprising results and numerical + # stability issues. + constant_mask = scale < 10 * xp.finfo(scale.dtype).eps + + if copy: + # New array to avoid side-effects + scale = xp.asarray(scale, copy=True) + scale[constant_mask] = 1.0 + return scale + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "axis": [Options(Integral, {0, 1})], + "with_mean": ["boolean"], + "with_std": ["boolean"], + "copy": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def scale(X, *, axis=0, with_mean=True, with_std=True, copy=True): + """Standardize a dataset along any axis. + + Center to the mean and component wise scale to unit variance. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to center and scale. + + axis : {0, 1}, default=0 + Axis used to compute the means and standard deviations along. If 0, + independently standardize each feature, otherwise (if 1) standardize + each sample. + + with_mean : bool, default=True + If True, center the data before scaling. + + with_std : bool, default=True + If True, scale the data to unit variance (or equivalently, + unit standard deviation). + + copy : bool, default=True + If False, try to avoid a copy and scale in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + The transformed data. + + See Also + -------- + StandardScaler : Performs scaling to unit variance using the Transformer + API (e.g. as part of a preprocessing + :class:`~sklearn.pipeline.Pipeline`). + + Notes + ----- + This implementation will refuse to center scipy.sparse matrices + since it would make them non-sparse and would potentially crash the + program with memory exhaustion problems. + + Instead the caller is expected to either set explicitly + `with_mean=False` (in that case, only variance scaling will be + performed on the features of the CSC matrix) or to call `X.toarray()` + if he/she expects the materialized dense array to fit in memory. + + To avoid memory copy the caller should pass a CSC matrix. + + NaNs are treated as missing values: disregarded to compute the statistics, + and maintained during the data transformation. + + We use a biased estimator for the standard deviation, equivalent to + `numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to + affect model performance. + + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + .. warning:: Risk of data leak + + Do not use :func:`~sklearn.preprocessing.scale` unless you know + what you are doing. A common mistake is to apply it to the entire data + *before* splitting into training and test sets. This will bias the + model evaluation because information would have leaked from the test + set to the training set. + In general, we recommend using + :class:`~sklearn.preprocessing.StandardScaler` within a + :ref:`Pipeline ` in order to prevent most risks of data + leaking: `pipe = make_pipeline(StandardScaler(), LogisticRegression())`. + + Examples + -------- + >>> from sklearn.preprocessing import scale + >>> X = [[-2, 1, 2], [-1, 0, 1]] + >>> scale(X, axis=0) # scaling each column independently + array([[-1., 1., 1.], + [ 1., -1., -1.]]) + >>> scale(X, axis=1) # scaling each row independently + array([[-1.37..., 0.39..., 0.98...], + [-1.22..., 0. , 1.22...]]) + """ + X = check_array( + X, + accept_sparse="csc", + copy=copy, + ensure_2d=False, + estimator="the scale function", + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + if sparse.issparse(X): + if with_mean: + raise ValueError( + "Cannot center sparse matrices: pass `with_mean=False` instead" + " See docstring for motivation and alternatives." + ) + if axis != 0: + raise ValueError( + "Can only scale sparse matrix on axis=0, got axis=%d" % axis + ) + if with_std: + _, var = mean_variance_axis(X, axis=0) + var = _handle_zeros_in_scale(var, copy=False) + inplace_column_scale(X, 1 / np.sqrt(var)) + else: + X = np.asarray(X) + if with_mean: + mean_ = np.nanmean(X, axis) + if with_std: + scale_ = np.nanstd(X, axis) + # Xr is a view on the original array that enables easy use of + # broadcasting on the axis in which we are interested in + Xr = np.rollaxis(X, axis) + if with_mean: + Xr -= mean_ + mean_1 = np.nanmean(Xr, axis=0) + # Verify that mean_1 is 'close to zero'. If X contains very + # large values, mean_1 can also be very large, due to a lack of + # precision of mean_. In this case, a pre-scaling of the + # concerned feature is efficient, for instance by its mean or + # maximum. + if not np.allclose(mean_1, 0): + warnings.warn( + "Numerical issues were encountered " + "when centering the data " + "and might not be solved. Dataset may " + "contain too large values. You may need " + "to prescale your features." + ) + Xr -= mean_1 + if with_std: + scale_ = _handle_zeros_in_scale(scale_, copy=False) + Xr /= scale_ + if with_mean: + mean_2 = np.nanmean(Xr, axis=0) + # If mean_2 is not 'close to zero', it comes from the fact that + # scale_ is very small so that mean_2 = mean_1/scale_ > 0, even + # if mean_1 was close to zero. The problem is thus essentially + # due to the lack of precision of mean_. A solution is then to + # subtract the mean again: + if not np.allclose(mean_2, 0): + warnings.warn( + "Numerical issues were encountered " + "when scaling the data " + "and might not be solved. The standard " + "deviation of the data is probably " + "very close to 0. " + ) + Xr -= mean_2 + return X + + +class MinMaxScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Transform features by scaling each feature to a given range. + + This estimator scales and translates each feature individually such + that it is in the given range on the training set, e.g. between + zero and one. + + The transformation is given by:: + + X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) + X_scaled = X_std * (max - min) + min + + where min, max = feature_range. + + This transformation is often used as an alternative to zero mean, + unit variance scaling. + + `MinMaxScaler` doesn't reduce the effect of outliers, but it linearly + scales them down into a fixed range, where the largest occurring data point + corresponds to the maximum value and the smallest one corresponds to the + minimum value. For an example visualization, refer to :ref:`Compare + MinMaxScaler with other scalers `. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + feature_range : tuple (min, max), default=(0, 1) + Desired range of transformed data. + + copy : bool, default=True + Set to False to perform inplace row normalization and avoid a + copy (if the input is already a numpy array). + + clip : bool, default=False + Set to True to clip transformed values of held-out data to + provided `feature range`. + + .. versionadded:: 0.24 + + Attributes + ---------- + min_ : ndarray of shape (n_features,) + Per feature adjustment for minimum. Equivalent to + ``min - X.min(axis=0) * self.scale_`` + + scale_ : ndarray of shape (n_features,) + Per feature relative scaling of the data. Equivalent to + ``(max - min) / (X.max(axis=0) - X.min(axis=0))`` + + .. versionadded:: 0.17 + *scale_* attribute. + + data_min_ : ndarray of shape (n_features,) + Per feature minimum seen in the data + + .. versionadded:: 0.17 + *data_min_* + + data_max_ : ndarray of shape (n_features,) + Per feature maximum seen in the data + + .. versionadded:: 0.17 + *data_max_* + + data_range_ : ndarray of shape (n_features,) + Per feature range ``(data_max_ - data_min_)`` seen in the data + + .. versionadded:: 0.17 + *data_range_* + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + n_samples_seen_ : int + The number of samples processed by the estimator. + It will be reset on new calls to fit, but increments across + ``partial_fit`` calls. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + minmax_scale : Equivalent function without the estimator API. + + Notes + ----- + NaNs are treated as missing values: disregarded in fit, and maintained in + transform. + + Examples + -------- + >>> from sklearn.preprocessing import MinMaxScaler + >>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]] + >>> scaler = MinMaxScaler() + >>> print(scaler.fit(data)) + MinMaxScaler() + >>> print(scaler.data_max_) + [ 1. 18.] + >>> print(scaler.transform(data)) + [[0. 0. ] + [0.25 0.25] + [0.5 0.5 ] + [1. 1. ]] + >>> print(scaler.transform([[2, 2]])) + [[1.5 0. ]] + """ + + _parameter_constraints: dict = { + "feature_range": [tuple], + "copy": ["boolean"], + "clip": ["boolean"], + } + + def __init__(self, feature_range=(0, 1), *, copy=True, clip=False): + self.feature_range = feature_range + self.copy = copy + self.clip = clip + + def _reset(self): + """Reset internal data-dependent state of the scaler, if necessary. + + __init__ parameters are not touched. + """ + # Checking one attribute is enough, because they are all set together + # in partial_fit + if hasattr(self, "scale_"): + del self.scale_ + del self.min_ + del self.n_samples_seen_ + del self.data_min_ + del self.data_max_ + del self.data_range_ + + def fit(self, X, y=None): + """Compute the minimum and maximum to be used for later scaling. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data used to compute the per-feature minimum and maximum + used for later scaling along the features axis. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted scaler. + """ + # Reset internal state before fitting + self._reset() + return self.partial_fit(X, y) + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Online computation of min and max on X for later scaling. + + All of X is processed as a single batch. This is intended for cases + when :meth:`fit` is not feasible due to very large number of + `n_samples` or because X is read from a continuous stream. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data used to compute the mean and standard deviation + used for later scaling along the features axis. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted scaler. + """ + feature_range = self.feature_range + if feature_range[0] >= feature_range[1]: + raise ValueError( + "Minimum of desired feature range must be smaller than maximum. Got %s." + % str(feature_range) + ) + + if sparse.issparse(X): + raise TypeError( + "MinMaxScaler does not support sparse input. " + "Consider using MaxAbsScaler instead." + ) + + xp, _ = get_namespace(X) + + first_pass = not hasattr(self, "n_samples_seen_") + X = self._validate_data( + X, + reset=first_pass, + dtype=_array_api.supported_float_dtypes(xp), + force_all_finite="allow-nan", + ) + + data_min = _array_api._nanmin(X, axis=0) + data_max = _array_api._nanmax(X, axis=0) + + if first_pass: + self.n_samples_seen_ = X.shape[0] + else: + data_min = xp.minimum(self.data_min_, data_min) + data_max = xp.maximum(self.data_max_, data_max) + self.n_samples_seen_ += X.shape[0] + + data_range = data_max - data_min + self.scale_ = (feature_range[1] - feature_range[0]) / _handle_zeros_in_scale( + data_range, copy=True + ) + self.min_ = feature_range[0] - data_min * self.scale_ + self.data_min_ = data_min + self.data_max_ = data_max + self.data_range_ = data_range + return self + + def transform(self, X): + """Scale features of X according to feature_range. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data that will be transformed. + + Returns + ------- + Xt : ndarray of shape (n_samples, n_features) + Transformed data. + """ + check_is_fitted(self) + + xp, _ = get_namespace(X) + + X = self._validate_data( + X, + copy=self.copy, + dtype=_array_api.supported_float_dtypes(xp), + force_all_finite="allow-nan", + reset=False, + ) + + X *= self.scale_ + X += self.min_ + if self.clip: + xp.clip(X, self.feature_range[0], self.feature_range[1], out=X) + return X + + def inverse_transform(self, X): + """Undo the scaling of X according to feature_range. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data that will be transformed. It cannot be sparse. + + Returns + ------- + Xt : ndarray of shape (n_samples, n_features) + Transformed data. + """ + check_is_fitted(self) + + xp, _ = get_namespace(X) + + X = check_array( + X, + copy=self.copy, + dtype=_array_api.supported_float_dtypes(xp), + force_all_finite="allow-nan", + ) + + X -= self.min_ + X /= self.scale_ + return X + + def _more_tags(self): + return {"allow_nan": True} + + +@validate_params( + { + "X": ["array-like"], + "axis": [Options(Integral, {0, 1})], + }, + prefer_skip_nested_validation=False, +) +def minmax_scale(X, feature_range=(0, 1), *, axis=0, copy=True): + """Transform features by scaling each feature to a given range. + + This estimator scales and translates each feature individually such + that it is in the given range on the training set, i.e. between + zero and one. + + The transformation is given by (when ``axis=0``):: + + X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) + X_scaled = X_std * (max - min) + min + + where min, max = feature_range. + + The transformation is calculated as (when ``axis=0``):: + + X_scaled = scale * X + min - X.min(axis=0) * scale + where scale = (max - min) / (X.max(axis=0) - X.min(axis=0)) + + This transformation is often used as an alternative to zero mean, + unit variance scaling. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.17 + *minmax_scale* function interface + to :class:`~sklearn.preprocessing.MinMaxScaler`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + + feature_range : tuple (min, max), default=(0, 1) + Desired range of transformed data. + + axis : {0, 1}, default=0 + Axis used to scale along. If 0, independently scale each feature, + otherwise (if 1) scale each sample. + + copy : bool, default=True + If False, try to avoid a copy and scale in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + Returns + ------- + X_tr : ndarray of shape (n_samples, n_features) + The transformed data. + + .. warning:: Risk of data leak + + Do not use :func:`~sklearn.preprocessing.minmax_scale` unless you know + what you are doing. A common mistake is to apply it to the entire data + *before* splitting into training and test sets. This will bias the + model evaluation because information would have leaked from the test + set to the training set. + In general, we recommend using + :class:`~sklearn.preprocessing.MinMaxScaler` within a + :ref:`Pipeline ` in order to prevent most risks of data + leaking: `pipe = make_pipeline(MinMaxScaler(), LogisticRegression())`. + + See Also + -------- + MinMaxScaler : Performs scaling to a given range using the Transformer + API (e.g. as part of a preprocessing + :class:`~sklearn.pipeline.Pipeline`). + + Notes + ----- + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + Examples + -------- + >>> from sklearn.preprocessing import minmax_scale + >>> X = [[-2, 1, 2], [-1, 0, 1]] + >>> minmax_scale(X, axis=0) # scale each column independently + array([[0., 1., 1.], + [1., 0., 0.]]) + >>> minmax_scale(X, axis=1) # scale each row independently + array([[0. , 0.75, 1. ], + [0. , 0.5 , 1. ]]) + """ + # Unlike the scaler object, this function allows 1d input. + # If copy is required, it will be done inside the scaler object. + X = check_array( + X, copy=False, ensure_2d=False, dtype=FLOAT_DTYPES, force_all_finite="allow-nan" + ) + original_ndim = X.ndim + + if original_ndim == 1: + X = X.reshape(X.shape[0], 1) + + s = MinMaxScaler(feature_range=feature_range, copy=copy) + if axis == 0: + X = s.fit_transform(X) + else: + X = s.fit_transform(X.T).T + + if original_ndim == 1: + X = X.ravel() + + return X + + +class StandardScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Standardize features by removing the mean and scaling to unit variance. + + The standard score of a sample `x` is calculated as: + + z = (x - u) / s + + where `u` is the mean of the training samples or zero if `with_mean=False`, + and `s` is the standard deviation of the training samples or one if + `with_std=False`. + + Centering and scaling happen independently on each feature by computing + the relevant statistics on the samples in the training set. Mean and + standard deviation are then stored to be used on later data using + :meth:`transform`. + + Standardization of a dataset is a common requirement for many + machine learning estimators: they might behave badly if the + individual features do not more or less look like standard normally + distributed data (e.g. Gaussian with 0 mean and unit variance). + + For instance many elements used in the objective function of + a learning algorithm (such as the RBF kernel of Support Vector + Machines or the L1 and L2 regularizers of linear models) assume that + all features are centered around 0 and have variance in the same + order. If a feature has a variance that is orders of magnitude larger + than others, it might dominate the objective function and make the + estimator unable to learn from other features correctly as expected. + + `StandardScaler` is sensitive to outliers, and the features may scale + differently from each other in the presence of outliers. For an example + visualization, refer to :ref:`Compare StandardScaler with other scalers + `. + + This scaler can also be applied to sparse CSR or CSC matrices by passing + `with_mean=False` to avoid breaking the sparsity structure of the data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + copy : bool, default=True + If False, try to avoid a copy and do inplace scaling instead. + This is not guaranteed to always work inplace; e.g. if the data is + not a NumPy array or scipy.sparse CSR matrix, a copy may still be + returned. + + with_mean : bool, default=True + If True, center the data before scaling. + This does not work (and will raise an exception) when attempted on + sparse matrices, because centering them entails building a dense + matrix which in common use cases is likely to be too large to fit in + memory. + + with_std : bool, default=True + If True, scale the data to unit variance (or equivalently, + unit standard deviation). + + Attributes + ---------- + scale_ : ndarray of shape (n_features,) or None + Per feature relative scaling of the data to achieve zero mean and unit + variance. Generally this is calculated using `np.sqrt(var_)`. If a + variance is zero, we can't achieve unit variance, and the data is left + as-is, giving a scaling factor of 1. `scale_` is equal to `None` + when `with_std=False`. + + .. versionadded:: 0.17 + *scale_* + + mean_ : ndarray of shape (n_features,) or None + The mean value for each feature in the training set. + Equal to ``None`` when ``with_mean=False`` and ``with_std=False``. + + var_ : ndarray of shape (n_features,) or None + The variance for each feature in the training set. Used to compute + `scale_`. Equal to ``None`` when ``with_mean=False`` and + ``with_std=False``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_seen_ : int or ndarray of shape (n_features,) + The number of samples processed by the estimator for each feature. + If there are no missing samples, the ``n_samples_seen`` will be an + integer, otherwise it will be an array of dtype int. If + `sample_weights` are used it will be a float (if no missing data) + or an array of dtype float that sums the weights seen so far. + Will be reset on new calls to fit, but increments across + ``partial_fit`` calls. + + See Also + -------- + scale : Equivalent function without the estimator API. + + :class:`~sklearn.decomposition.PCA` : Further removes the linear + correlation across features with 'whiten=True'. + + Notes + ----- + NaNs are treated as missing values: disregarded in fit, and maintained in + transform. + + We use a biased estimator for the standard deviation, equivalent to + `numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to + affect model performance. + + Examples + -------- + >>> from sklearn.preprocessing import StandardScaler + >>> data = [[0, 0], [0, 0], [1, 1], [1, 1]] + >>> scaler = StandardScaler() + >>> print(scaler.fit(data)) + StandardScaler() + >>> print(scaler.mean_) + [0.5 0.5] + >>> print(scaler.transform(data)) + [[-1. -1.] + [-1. -1.] + [ 1. 1.] + [ 1. 1.]] + >>> print(scaler.transform([[2, 2]])) + [[3. 3.]] + """ + + _parameter_constraints: dict = { + "copy": ["boolean"], + "with_mean": ["boolean"], + "with_std": ["boolean"], + } + + def __init__(self, *, copy=True, with_mean=True, with_std=True): + self.with_mean = with_mean + self.with_std = with_std + self.copy = copy + + def _reset(self): + """Reset internal data-dependent state of the scaler, if necessary. + + __init__ parameters are not touched. + """ + # Checking one attribute is enough, because they are all set together + # in partial_fit + if hasattr(self, "scale_"): + del self.scale_ + del self.n_samples_seen_ + del self.mean_ + del self.var_ + + def fit(self, X, y=None, sample_weight=None): + """Compute the mean and std to be used for later scaling. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to compute the mean and standard deviation + used for later scaling along the features axis. + + y : None + Ignored. + + sample_weight : array-like of shape (n_samples,), default=None + Individual weights for each sample. + + .. versionadded:: 0.24 + parameter *sample_weight* support to StandardScaler. + + Returns + ------- + self : object + Fitted scaler. + """ + # Reset internal state before fitting + self._reset() + return self.partial_fit(X, y, sample_weight) + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, sample_weight=None): + """Online computation of mean and std on X for later scaling. + + All of X is processed as a single batch. This is intended for cases + when :meth:`fit` is not feasible due to very large number of + `n_samples` or because X is read from a continuous stream. + + The algorithm for incremental mean and std is given in Equation 1.5a,b + in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms + for computing the sample variance: Analysis and recommendations." + The American Statistician 37.3 (1983): 242-247: + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to compute the mean and standard deviation + used for later scaling along the features axis. + + y : None + Ignored. + + sample_weight : array-like of shape (n_samples,), default=None + Individual weights for each sample. + + .. versionadded:: 0.24 + parameter *sample_weight* support to StandardScaler. + + Returns + ------- + self : object + Fitted scaler. + """ + first_call = not hasattr(self, "n_samples_seen_") + X = self._validate_data( + X, + accept_sparse=("csr", "csc"), + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + reset=first_call, + ) + n_features = X.shape[1] + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + # Even in the case of `with_mean=False`, we update the mean anyway + # This is needed for the incremental computation of the var + # See incr_mean_variance_axis and _incremental_mean_variance_axis + + # if n_samples_seen_ is an integer (i.e. no missing values), we need to + # transform it to a NumPy array of shape (n_features,) required by + # incr_mean_variance_axis and _incremental_variance_axis + dtype = np.int64 if sample_weight is None else X.dtype + if not hasattr(self, "n_samples_seen_"): + self.n_samples_seen_ = np.zeros(n_features, dtype=dtype) + elif np.size(self.n_samples_seen_) == 1: + self.n_samples_seen_ = np.repeat(self.n_samples_seen_, X.shape[1]) + self.n_samples_seen_ = self.n_samples_seen_.astype(dtype, copy=False) + + if sparse.issparse(X): + if self.with_mean: + raise ValueError( + "Cannot center sparse matrices: pass `with_mean=False` " + "instead. See docstring for motivation and alternatives." + ) + sparse_constructor = ( + sparse.csr_matrix if X.format == "csr" else sparse.csc_matrix + ) + + if self.with_std: + # First pass + if not hasattr(self, "scale_"): + self.mean_, self.var_, self.n_samples_seen_ = mean_variance_axis( + X, axis=0, weights=sample_weight, return_sum_weights=True + ) + # Next passes + else: + ( + self.mean_, + self.var_, + self.n_samples_seen_, + ) = incr_mean_variance_axis( + X, + axis=0, + last_mean=self.mean_, + last_var=self.var_, + last_n=self.n_samples_seen_, + weights=sample_weight, + ) + # We force the mean and variance to float64 for large arrays + # See https://github.com/scikit-learn/scikit-learn/pull/12338 + self.mean_ = self.mean_.astype(np.float64, copy=False) + self.var_ = self.var_.astype(np.float64, copy=False) + else: + self.mean_ = None # as with_mean must be False for sparse + self.var_ = None + weights = _check_sample_weight(sample_weight, X) + sum_weights_nan = weights @ sparse_constructor( + (np.isnan(X.data), X.indices, X.indptr), shape=X.shape + ) + self.n_samples_seen_ += (np.sum(weights) - sum_weights_nan).astype( + dtype + ) + else: + # First pass + if not hasattr(self, "scale_"): + self.mean_ = 0.0 + if self.with_std: + self.var_ = 0.0 + else: + self.var_ = None + + if not self.with_mean and not self.with_std: + self.mean_ = None + self.var_ = None + self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0) + + else: + self.mean_, self.var_, self.n_samples_seen_ = _incremental_mean_and_var( + X, + self.mean_, + self.var_, + self.n_samples_seen_, + sample_weight=sample_weight, + ) + + # for backward-compatibility, reduce n_samples_seen_ to an integer + # if the number of samples is the same for each feature (i.e. no + # missing values) + if np.ptp(self.n_samples_seen_) == 0: + self.n_samples_seen_ = self.n_samples_seen_[0] + + if self.with_std: + # Extract the list of near constant features on the raw variances, + # before taking the square root. + constant_mask = _is_constant_feature( + self.var_, self.mean_, self.n_samples_seen_ + ) + self.scale_ = _handle_zeros_in_scale( + np.sqrt(self.var_), copy=False, constant_mask=constant_mask + ) + else: + self.scale_ = None + + return self + + def transform(self, X, copy=None): + """Perform standardization by centering and scaling. + + Parameters + ---------- + X : {array-like, sparse matrix of shape (n_samples, n_features) + The data used to scale along the features axis. + copy : bool, default=None + Copy the input X or not. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + check_is_fitted(self) + + copy = copy if copy is not None else self.copy + X = self._validate_data( + X, + reset=False, + accept_sparse="csr", + copy=copy, + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + if self.with_mean: + raise ValueError( + "Cannot center sparse matrices: pass `with_mean=False` " + "instead. See docstring for motivation and alternatives." + ) + if self.scale_ is not None: + inplace_column_scale(X, 1 / self.scale_) + else: + if self.with_mean: + X -= self.mean_ + if self.with_std: + X /= self.scale_ + return X + + def inverse_transform(self, X, copy=None): + """Scale back the data to the original representation. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to scale along the features axis. + copy : bool, default=None + Copy the input X or not. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + check_is_fitted(self) + + copy = copy if copy is not None else self.copy + X = check_array( + X, + accept_sparse="csr", + copy=copy, + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + if self.with_mean: + raise ValueError( + "Cannot uncenter sparse matrices: pass `with_mean=False` " + "instead See docstring for motivation and alternatives." + ) + if self.scale_ is not None: + inplace_column_scale(X, self.scale_) + else: + if self.with_std: + X *= self.scale_ + if self.with_mean: + X += self.mean_ + return X + + def _more_tags(self): + return {"allow_nan": True, "preserves_dtype": [np.float64, np.float32]} + + +class MaxAbsScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Scale each feature by its maximum absolute value. + + This estimator scales and translates each feature individually such + that the maximal absolute value of each feature in the + training set will be 1.0. It does not shift/center the data, and + thus does not destroy any sparsity. + + This scaler can also be applied to sparse CSR or CSC matrices. + + `MaxAbsScaler` doesn't reduce the effect of outliers; it only linearly + scales them down. For an example visualization, refer to :ref:`Compare + MaxAbsScaler with other scalers `. + + .. versionadded:: 0.17 + + Parameters + ---------- + copy : bool, default=True + Set to False to perform inplace scaling and avoid a copy (if the input + is already a numpy array). + + Attributes + ---------- + scale_ : ndarray of shape (n_features,) + Per feature relative scaling of the data. + + .. versionadded:: 0.17 + *scale_* attribute. + + max_abs_ : ndarray of shape (n_features,) + Per feature maximum absolute value. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_seen_ : int + The number of samples processed by the estimator. Will be reset on + new calls to fit, but increments across ``partial_fit`` calls. + + See Also + -------- + maxabs_scale : Equivalent function without the estimator API. + + Notes + ----- + NaNs are treated as missing values: disregarded in fit, and maintained in + transform. + + Examples + -------- + >>> from sklearn.preprocessing import MaxAbsScaler + >>> X = [[ 1., -1., 2.], + ... [ 2., 0., 0.], + ... [ 0., 1., -1.]] + >>> transformer = MaxAbsScaler().fit(X) + >>> transformer + MaxAbsScaler() + >>> transformer.transform(X) + array([[ 0.5, -1. , 1. ], + [ 1. , 0. , 0. ], + [ 0. , 1. , -0.5]]) + """ + + _parameter_constraints: dict = {"copy": ["boolean"]} + + def __init__(self, *, copy=True): + self.copy = copy + + def _reset(self): + """Reset internal data-dependent state of the scaler, if necessary. + + __init__ parameters are not touched. + """ + # Checking one attribute is enough, because they are all set together + # in partial_fit + if hasattr(self, "scale_"): + del self.scale_ + del self.n_samples_seen_ + del self.max_abs_ + + def fit(self, X, y=None): + """Compute the maximum absolute value to be used for later scaling. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to compute the per-feature minimum and maximum + used for later scaling along the features axis. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted scaler. + """ + # Reset internal state before fitting + self._reset() + return self.partial_fit(X, y) + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None): + """Online computation of max absolute value of X for later scaling. + + All of X is processed as a single batch. This is intended for cases + when :meth:`fit` is not feasible due to very large number of + `n_samples` or because X is read from a continuous stream. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to compute the mean and standard deviation + used for later scaling along the features axis. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted scaler. + """ + xp, _ = get_namespace(X) + + first_pass = not hasattr(self, "n_samples_seen_") + X = self._validate_data( + X, + reset=first_pass, + accept_sparse=("csr", "csc"), + dtype=_array_api.supported_float_dtypes(xp), + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + mins, maxs = min_max_axis(X, axis=0, ignore_nan=True) + max_abs = np.maximum(np.abs(mins), np.abs(maxs)) + else: + max_abs = _array_api._nanmax(xp.abs(X), axis=0) + + if first_pass: + self.n_samples_seen_ = X.shape[0] + else: + max_abs = xp.maximum(self.max_abs_, max_abs) + self.n_samples_seen_ += X.shape[0] + + self.max_abs_ = max_abs + self.scale_ = _handle_zeros_in_scale(max_abs, copy=True) + return self + + def transform(self, X): + """Scale the data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data that should be scaled. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + check_is_fitted(self) + + xp, _ = get_namespace(X) + + X = self._validate_data( + X, + accept_sparse=("csr", "csc"), + copy=self.copy, + reset=False, + dtype=_array_api.supported_float_dtypes(xp), + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + inplace_column_scale(X, 1.0 / self.scale_) + else: + X /= self.scale_ + return X + + def inverse_transform(self, X): + """Scale back the data to the original representation. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data that should be transformed back. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + check_is_fitted(self) + + xp, _ = get_namespace(X) + + X = check_array( + X, + accept_sparse=("csr", "csc"), + copy=self.copy, + dtype=_array_api.supported_float_dtypes(xp), + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + inplace_column_scale(X, self.scale_) + else: + X *= self.scale_ + return X + + def _more_tags(self): + return {"allow_nan": True} + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "axis": [Options(Integral, {0, 1})], + }, + prefer_skip_nested_validation=False, +) +def maxabs_scale(X, *, axis=0, copy=True): + """Scale each feature to the [-1, 1] range without breaking the sparsity. + + This estimator scales each feature individually such + that the maximal absolute value of each feature in the + training set will be 1.0. + + This scaler can also be applied to sparse CSR or CSC matrices. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data. + + axis : {0, 1}, default=0 + Axis used to scale along. If 0, independently scale each feature, + otherwise (if 1) scale each sample. + + copy : bool, default=True + If False, try to avoid a copy and scale in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + The transformed data. + + .. warning:: Risk of data leak + + Do not use :func:`~sklearn.preprocessing.maxabs_scale` unless you know + what you are doing. A common mistake is to apply it to the entire data + *before* splitting into training and test sets. This will bias the + model evaluation because information would have leaked from the test + set to the training set. + In general, we recommend using + :class:`~sklearn.preprocessing.MaxAbsScaler` within a + :ref:`Pipeline ` in order to prevent most risks of data + leaking: `pipe = make_pipeline(MaxAbsScaler(), LogisticRegression())`. + + See Also + -------- + MaxAbsScaler : Performs scaling to the [-1, 1] range using + the Transformer API (e.g. as part of a preprocessing + :class:`~sklearn.pipeline.Pipeline`). + + Notes + ----- + NaNs are treated as missing values: disregarded to compute the statistics, + and maintained during the data transformation. + + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + Examples + -------- + >>> from sklearn.preprocessing import maxabs_scale + >>> X = [[-2, 1, 2], [-1, 0, 1]] + >>> maxabs_scale(X, axis=0) # scale each column independently + array([[-1. , 1. , 1. ], + [-0.5, 0. , 0.5]]) + >>> maxabs_scale(X, axis=1) # scale each row independently + array([[-1. , 0.5, 1. ], + [-1. , 0. , 1. ]]) + """ + # Unlike the scaler object, this function allows 1d input. + + # If copy is required, it will be done inside the scaler object. + X = check_array( + X, + accept_sparse=("csr", "csc"), + copy=False, + ensure_2d=False, + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + original_ndim = X.ndim + + if original_ndim == 1: + X = X.reshape(X.shape[0], 1) + + s = MaxAbsScaler(copy=copy) + if axis == 0: + X = s.fit_transform(X) + else: + X = s.fit_transform(X.T).T + + if original_ndim == 1: + X = X.ravel() + + return X + + +class RobustScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Scale features using statistics that are robust to outliers. + + This Scaler removes the median and scales the data according to + the quantile range (defaults to IQR: Interquartile Range). + The IQR is the range between the 1st quartile (25th quantile) + and the 3rd quartile (75th quantile). + + Centering and scaling happen independently on each feature by + computing the relevant statistics on the samples in the training + set. Median and interquartile range are then stored to be used on + later data using the :meth:`transform` method. + + Standardization of a dataset is a common preprocessing for many machine + learning estimators. Typically this is done by removing the mean and + scaling to unit variance. However, outliers can often influence the sample + mean / variance in a negative way. In such cases, using the median and the + interquartile range often give better results. For an example visualization + and comparison to other scalers, refer to :ref:`Compare RobustScaler with + other scalers `. + + .. versionadded:: 0.17 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + with_centering : bool, default=True + If `True`, center the data before scaling. + This will cause :meth:`transform` to raise an exception when attempted + on sparse matrices, because centering them entails building a dense + matrix which in common use cases is likely to be too large to fit in + memory. + + with_scaling : bool, default=True + If `True`, scale the data to interquartile range. + + quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0, \ + default=(25.0, 75.0) + Quantile range used to calculate `scale_`. By default this is equal to + the IQR, i.e., `q_min` is the first quantile and `q_max` is the third + quantile. + + .. versionadded:: 0.18 + + copy : bool, default=True + If `False`, try to avoid a copy and do inplace scaling instead. + This is not guaranteed to always work inplace; e.g. if the data is + not a NumPy array or scipy.sparse CSR matrix, a copy may still be + returned. + + unit_variance : bool, default=False + If `True`, scale data so that normally distributed features have a + variance of 1. In general, if the difference between the x-values of + `q_max` and `q_min` for a standard normal distribution is greater + than 1, the dataset will be scaled down. If less than 1, the dataset + will be scaled up. + + .. versionadded:: 0.24 + + Attributes + ---------- + center_ : array of floats + The median value for each feature in the training set. + + scale_ : array of floats + The (scaled) interquartile range for each feature in the training set. + + .. versionadded:: 0.17 + *scale_* attribute. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + robust_scale : Equivalent function without the estimator API. + sklearn.decomposition.PCA : Further removes the linear correlation across + features with 'whiten=True'. + + Notes + ----- + + https://en.wikipedia.org/wiki/Median + https://en.wikipedia.org/wiki/Interquartile_range + + Examples + -------- + >>> from sklearn.preprocessing import RobustScaler + >>> X = [[ 1., -2., 2.], + ... [ -2., 1., 3.], + ... [ 4., 1., -2.]] + >>> transformer = RobustScaler().fit(X) + >>> transformer + RobustScaler() + >>> transformer.transform(X) + array([[ 0. , -2. , 0. ], + [-1. , 0. , 0.4], + [ 1. , 0. , -1.6]]) + """ + + _parameter_constraints: dict = { + "with_centering": ["boolean"], + "with_scaling": ["boolean"], + "quantile_range": [tuple], + "copy": ["boolean"], + "unit_variance": ["boolean"], + } + + def __init__( + self, + *, + with_centering=True, + with_scaling=True, + quantile_range=(25.0, 75.0), + copy=True, + unit_variance=False, + ): + self.with_centering = with_centering + self.with_scaling = with_scaling + self.quantile_range = quantile_range + self.unit_variance = unit_variance + self.copy = copy + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Compute the median and quantiles to be used for scaling. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to compute the median and quantiles + used for later scaling along the features axis. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Fitted scaler. + """ + # at fit, convert sparse matrices to csc for optimized computation of + # the quantiles + X = self._validate_data( + X, + accept_sparse="csc", + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + + q_min, q_max = self.quantile_range + if not 0 <= q_min <= q_max <= 100: + raise ValueError("Invalid quantile range: %s" % str(self.quantile_range)) + + if self.with_centering: + if sparse.issparse(X): + raise ValueError( + "Cannot center sparse matrices: use `with_centering=False`" + " instead. See docstring for motivation and alternatives." + ) + self.center_ = np.nanmedian(X, axis=0) + else: + self.center_ = None + + if self.with_scaling: + quantiles = [] + for feature_idx in range(X.shape[1]): + if sparse.issparse(X): + column_nnz_data = X.data[ + X.indptr[feature_idx] : X.indptr[feature_idx + 1] + ] + column_data = np.zeros(shape=X.shape[0], dtype=X.dtype) + column_data[: len(column_nnz_data)] = column_nnz_data + else: + column_data = X[:, feature_idx] + + quantiles.append(np.nanpercentile(column_data, self.quantile_range)) + + quantiles = np.transpose(quantiles) + + self.scale_ = quantiles[1] - quantiles[0] + self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False) + if self.unit_variance: + adjust = stats.norm.ppf(q_max / 100.0) - stats.norm.ppf(q_min / 100.0) + self.scale_ = self.scale_ / adjust + else: + self.scale_ = None + + return self + + def transform(self, X): + """Center and scale the data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to scale along the specified axis. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=("csr", "csc"), + copy=self.copy, + dtype=FLOAT_DTYPES, + reset=False, + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + if self.with_scaling: + inplace_column_scale(X, 1.0 / self.scale_) + else: + if self.with_centering: + X -= self.center_ + if self.with_scaling: + X /= self.scale_ + return X + + def inverse_transform(self, X): + """Scale back the data to the original representation. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The rescaled data to be transformed back. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + check_is_fitted(self) + X = check_array( + X, + accept_sparse=("csr", "csc"), + copy=self.copy, + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + + if sparse.issparse(X): + if self.with_scaling: + inplace_column_scale(X, self.scale_) + else: + if self.with_scaling: + X *= self.scale_ + if self.with_centering: + X += self.center_ + return X + + def _more_tags(self): + return {"allow_nan": True} + + +@validate_params( + {"X": ["array-like", "sparse matrix"], "axis": [Options(Integral, {0, 1})]}, + prefer_skip_nested_validation=False, +) +def robust_scale( + X, + *, + axis=0, + with_centering=True, + with_scaling=True, + quantile_range=(25.0, 75.0), + copy=True, + unit_variance=False, +): + """Standardize a dataset along any axis. + + Center to the median and component wise scale + according to the interquartile range. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_sample, n_features) + The data to center and scale. + + axis : int, default=0 + Axis used to compute the medians and IQR along. If 0, + independently scale each feature, otherwise (if 1) scale + each sample. + + with_centering : bool, default=True + If `True`, center the data before scaling. + + with_scaling : bool, default=True + If `True`, scale the data to unit variance (or equivalently, + unit standard deviation). + + quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0,\ + default=(25.0, 75.0) + Quantile range used to calculate `scale_`. By default this is equal to + the IQR, i.e., `q_min` is the first quantile and `q_max` is the third + quantile. + + .. versionadded:: 0.18 + + copy : bool, default=True + If False, try to avoid a copy and scale in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + unit_variance : bool, default=False + If `True`, scale data so that normally distributed features have a + variance of 1. In general, if the difference between the x-values of + `q_max` and `q_min` for a standard normal distribution is greater + than 1, the dataset will be scaled down. If less than 1, the dataset + will be scaled up. + + .. versionadded:: 0.24 + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + The transformed data. + + See Also + -------- + RobustScaler : Performs centering and scaling using the Transformer API + (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). + + Notes + ----- + This implementation will refuse to center scipy.sparse matrices + since it would make them non-sparse and would potentially crash the + program with memory exhaustion problems. + + Instead the caller is expected to either set explicitly + `with_centering=False` (in that case, only variance scaling will be + performed on the features of the CSR matrix) or to call `X.toarray()` + if he/she expects the materialized dense array to fit in memory. + + To avoid memory copy the caller should pass a CSR matrix. + + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + .. warning:: Risk of data leak + + Do not use :func:`~sklearn.preprocessing.robust_scale` unless you know + what you are doing. A common mistake is to apply it to the entire data + *before* splitting into training and test sets. This will bias the + model evaluation because information would have leaked from the test + set to the training set. + In general, we recommend using + :class:`~sklearn.preprocessing.RobustScaler` within a + :ref:`Pipeline ` in order to prevent most risks of data + leaking: `pipe = make_pipeline(RobustScaler(), LogisticRegression())`. + + Examples + -------- + >>> from sklearn.preprocessing import robust_scale + >>> X = [[-2, 1, 2], [-1, 0, 1]] + >>> robust_scale(X, axis=0) # scale each column independently + array([[-1., 1., 1.], + [ 1., -1., -1.]]) + >>> robust_scale(X, axis=1) # scale each row independently + array([[-1.5, 0. , 0.5], + [-1. , 0. , 1. ]]) + """ + X = check_array( + X, + accept_sparse=("csr", "csc"), + copy=False, + ensure_2d=False, + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + original_ndim = X.ndim + + if original_ndim == 1: + X = X.reshape(X.shape[0], 1) + + s = RobustScaler( + with_centering=with_centering, + with_scaling=with_scaling, + quantile_range=quantile_range, + unit_variance=unit_variance, + copy=copy, + ) + if axis == 0: + X = s.fit_transform(X) + else: + X = s.fit_transform(X.T).T + + if original_ndim == 1: + X = X.ravel() + + return X + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "norm": [StrOptions({"l1", "l2", "max"})], + "axis": [Options(Integral, {0, 1})], + "copy": ["boolean"], + "return_norm": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def normalize(X, norm="l2", *, axis=1, copy=True, return_norm=False): + """Scale input vectors individually to unit norm (vector length). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to normalize, element by element. + scipy.sparse matrices should be in CSR format to avoid an + un-necessary copy. + + norm : {'l1', 'l2', 'max'}, default='l2' + The norm to use to normalize each non zero sample (or each non-zero + feature if axis is 0). + + axis : {0, 1}, default=1 + Define axis used to normalize the data along. If 1, independently + normalize each sample, otherwise (if 0) normalize each feature. + + copy : bool, default=True + If False, try to avoid a copy and normalize in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + return_norm : bool, default=False + Whether to return the computed norms. + + Returns + ------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Normalized input X. + + norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, ) + An array of norms along given axis for X. + When X is sparse, a NotImplementedError will be raised + for norm 'l1' or 'l2'. + + See Also + -------- + Normalizer : Performs normalization using the Transformer API + (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). + + Notes + ----- + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + Examples + -------- + >>> from sklearn.preprocessing import normalize + >>> X = [[-2, 1, 2], [-1, 0, 1]] + >>> normalize(X, norm="l1") # L1 normalization each row independently + array([[-0.4, 0.2, 0.4], + [-0.5, 0. , 0.5]]) + >>> normalize(X, norm="l2") # L2 normalization each row independently + array([[-0.66..., 0.33..., 0.66...], + [-0.70..., 0. , 0.70...]]) + """ + if axis == 0: + sparse_format = "csc" + else: # axis == 1: + sparse_format = "csr" + + xp, _ = get_namespace(X) + + X = check_array( + X, + accept_sparse=sparse_format, + copy=copy, + estimator="the normalize function", + dtype=_array_api.supported_float_dtypes(xp), + ) + if axis == 0: + X = X.T + + if sparse.issparse(X): + if return_norm and norm in ("l1", "l2"): + raise NotImplementedError( + "return_norm=True is not implemented " + "for sparse matrices with norm 'l1' " + "or norm 'l2'" + ) + if norm == "l1": + inplace_csr_row_normalize_l1(X) + elif norm == "l2": + inplace_csr_row_normalize_l2(X) + elif norm == "max": + mins, maxes = min_max_axis(X, 1) + norms = np.maximum(abs(mins), maxes) + norms_elementwise = norms.repeat(np.diff(X.indptr)) + mask = norms_elementwise != 0 + X.data[mask] /= norms_elementwise[mask] + else: + if norm == "l1": + norms = xp.sum(xp.abs(X), axis=1) + elif norm == "l2": + norms = row_norms(X) + elif norm == "max": + norms = xp.max(xp.abs(X), axis=1) + norms = _handle_zeros_in_scale(norms, copy=False) + X /= norms[:, None] + + if axis == 0: + X = X.T + + if return_norm: + return X, norms + else: + return X + + +class Normalizer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Normalize samples individually to unit norm. + + Each sample (i.e. each row of the data matrix) with at least one + non zero component is rescaled independently of other samples so + that its norm (l1, l2 or inf) equals one. + + This transformer is able to work both with dense numpy arrays and + scipy.sparse matrix (use CSR format if you want to avoid the burden of + a copy / conversion). + + Scaling inputs to unit norms is a common operation for text + classification or clustering for instance. For instance the dot + product of two l2-normalized TF-IDF vectors is the cosine similarity + of the vectors and is the base similarity metric for the Vector + Space Model commonly used by the Information Retrieval community. + + For an example visualization, refer to :ref:`Compare Normalizer with other + scalers `. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + norm : {'l1', 'l2', 'max'}, default='l2' + The norm to use to normalize each non zero sample. If norm='max' + is used, values will be rescaled by the maximum of the absolute + values. + + copy : bool, default=True + Set to False to perform inplace row normalization and avoid a + copy (if the input is already a numpy array or a scipy.sparse + CSR matrix). + + Attributes + ---------- + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + normalize : Equivalent function without the estimator API. + + Notes + ----- + This estimator is :term:`stateless` and does not need to be fitted. + However, we recommend to call :meth:`fit_transform` instead of + :meth:`transform`, as parameter validation is only performed in + :meth:`fit`. + + Examples + -------- + >>> from sklearn.preprocessing import Normalizer + >>> X = [[4, 1, 2, 2], + ... [1, 3, 9, 3], + ... [5, 7, 5, 1]] + >>> transformer = Normalizer().fit(X) # fit does nothing. + >>> transformer + Normalizer() + >>> transformer.transform(X) + array([[0.8, 0.2, 0.4, 0.4], + [0.1, 0.3, 0.9, 0.3], + [0.5, 0.7, 0.5, 0.1]]) + """ + + _parameter_constraints: dict = { + "norm": [StrOptions({"l1", "l2", "max"})], + "copy": ["boolean"], + } + + def __init__(self, norm="l2", *, copy=True): + self.norm = norm + self.copy = copy + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to estimate the normalization parameters. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Fitted transformer. + """ + self._validate_data(X, accept_sparse="csr") + return self + + def transform(self, X, copy=None): + """Scale each non zero row of X to unit norm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to normalize, row by row. scipy.sparse matrices should be + in CSR format to avoid an un-necessary copy. + + copy : bool, default=None + Copy the input X or not. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + copy = copy if copy is not None else self.copy + X = self._validate_data(X, accept_sparse="csr", reset=False) + return normalize(X, norm=self.norm, axis=1, copy=copy) + + def _more_tags(self): + return {"stateless": True, "array_api_support": True} + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "threshold": [Interval(Real, None, None, closed="neither")], + "copy": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def binarize(X, *, threshold=0.0, copy=True): + """Boolean thresholding of array-like or scipy.sparse matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to binarize, element by element. + scipy.sparse matrices should be in CSR or CSC format to avoid an + un-necessary copy. + + threshold : float, default=0.0 + Feature values below or equal to this are replaced by 0, above it by 1. + Threshold may not be less than 0 for operations on sparse matrices. + + copy : bool, default=True + If False, try to avoid a copy and binarize in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an object dtype, a copy will be returned even with + copy=False. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + The transformed data. + + See Also + -------- + Binarizer : Performs binarization using the Transformer API + (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). + + Examples + -------- + >>> from sklearn.preprocessing import binarize + >>> X = [[0.4, 0.6, 0.5], [0.6, 0.1, 0.2]] + >>> binarize(X, threshold=0.5) + array([[0., 1., 0.], + [1., 0., 0.]]) + """ + X = check_array(X, accept_sparse=["csr", "csc"], copy=copy) + if sparse.issparse(X): + if threshold < 0: + raise ValueError("Cannot binarize a sparse matrix with threshold < 0") + cond = X.data > threshold + not_cond = np.logical_not(cond) + X.data[cond] = 1 + X.data[not_cond] = 0 + X.eliminate_zeros() + else: + cond = X > threshold + not_cond = np.logical_not(cond) + X[cond] = 1 + X[not_cond] = 0 + return X + + +class Binarizer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Binarize data (set feature values to 0 or 1) according to a threshold. + + Values greater than the threshold map to 1, while values less than + or equal to the threshold map to 0. With the default threshold of 0, + only positive values map to 1. + + Binarization is a common operation on text count data where the + analyst can decide to only consider the presence or absence of a + feature rather than a quantified number of occurrences for instance. + + It can also be used as a pre-processing step for estimators that + consider boolean random variables (e.g. modelled using the Bernoulli + distribution in a Bayesian setting). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + threshold : float, default=0.0 + Feature values below or equal to this are replaced by 0, above it by 1. + Threshold may not be less than 0 for operations on sparse matrices. + + copy : bool, default=True + Set to False to perform inplace binarization and avoid a copy (if + the input is already a numpy array or a scipy.sparse CSR matrix). + + Attributes + ---------- + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + binarize : Equivalent function without the estimator API. + KBinsDiscretizer : Bin continuous data into intervals. + OneHotEncoder : Encode categorical features as a one-hot numeric array. + + Notes + ----- + If the input is a sparse matrix, only the non-zero values are subject + to update by the :class:`Binarizer` class. + + This estimator is :term:`stateless` and does not need to be fitted. + However, we recommend to call :meth:`fit_transform` instead of + :meth:`transform`, as parameter validation is only performed in + :meth:`fit`. + + Examples + -------- + >>> from sklearn.preprocessing import Binarizer + >>> X = [[ 1., -1., 2.], + ... [ 2., 0., 0.], + ... [ 0., 1., -1.]] + >>> transformer = Binarizer().fit(X) # fit does nothing. + >>> transformer + Binarizer() + >>> transformer.transform(X) + array([[1., 0., 1.], + [1., 0., 0.], + [0., 1., 0.]]) + """ + + _parameter_constraints: dict = { + "threshold": [Real], + "copy": ["boolean"], + } + + def __init__(self, *, threshold=0.0, copy=True): + self.threshold = threshold + self.copy = copy + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted transformer. + """ + self._validate_data(X, accept_sparse="csr") + return self + + def transform(self, X, copy=None): + """Binarize each element of X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to binarize, element by element. + scipy.sparse matrices should be in CSR format to avoid an + un-necessary copy. + + copy : bool + Copy the input X or not. + + Returns + ------- + X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) + Transformed array. + """ + copy = copy if copy is not None else self.copy + # TODO: This should be refactored because binarize also calls + # check_array + X = self._validate_data(X, accept_sparse=["csr", "csc"], copy=copy, reset=False) + return binarize(X, threshold=self.threshold, copy=False) + + def _more_tags(self): + return {"stateless": True} + + +class KernelCenterer(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + r"""Center an arbitrary kernel matrix :math:`K`. + + Let define a kernel :math:`K` such that: + + .. math:: + K(X, Y) = \phi(X) . \phi(Y)^{T} + + :math:`\phi(X)` is a function mapping of rows of :math:`X` to a + Hilbert space and :math:`K` is of shape `(n_samples, n_samples)`. + + This class allows to compute :math:`\tilde{K}(X, Y)` such that: + + .. math:: + \tilde{K(X, Y)} = \tilde{\phi}(X) . \tilde{\phi}(Y)^{T} + + :math:`\tilde{\phi}(X)` is the centered mapped data in the Hilbert + space. + + `KernelCenterer` centers the features without explicitly computing the + mapping :math:`\phi(\cdot)`. Working with centered kernels is sometime + expected when dealing with algebra computation such as eigendecomposition + for :class:`~sklearn.decomposition.KernelPCA` for instance. + + Read more in the :ref:`User Guide `. + + Attributes + ---------- + K_fit_rows_ : ndarray of shape (n_samples,) + Average of each column of kernel matrix. + + K_fit_all_ : float + Average of kernel matrix. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.kernel_approximation.Nystroem : Approximate a kernel map + using a subset of the training data. + + References + ---------- + .. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller. + "Nonlinear component analysis as a kernel eigenvalue problem." + Neural computation 10.5 (1998): 1299-1319. + `_ + + Examples + -------- + >>> from sklearn.preprocessing import KernelCenterer + >>> from sklearn.metrics.pairwise import pairwise_kernels + >>> X = [[ 1., -2., 2.], + ... [ -2., 1., 3.], + ... [ 4., 1., -2.]] + >>> K = pairwise_kernels(X, metric='linear') + >>> K + array([[ 9., 2., -2.], + [ 2., 14., -13.], + [ -2., -13., 21.]]) + >>> transformer = KernelCenterer().fit(K) + >>> transformer + KernelCenterer() + >>> transformer.transform(K) + array([[ 5., 0., -5.], + [ 0., 14., -14.], + [ -5., -14., 19.]]) + """ + + def __init__(self): + # Needed for backported inspect.signature compatibility with PyPy + pass + + def fit(self, K, y=None): + """Fit KernelCenterer. + + Parameters + ---------- + K : ndarray of shape (n_samples, n_samples) + Kernel matrix. + + y : None + Ignored. + + Returns + ------- + self : object + Returns the instance itself. + """ + xp, _ = get_namespace(K) + + K = self._validate_data(K, dtype=_array_api.supported_float_dtypes(xp)) + + if K.shape[0] != K.shape[1]: + raise ValueError( + "Kernel matrix must be a square matrix." + " Input is a {}x{} matrix.".format(K.shape[0], K.shape[1]) + ) + + n_samples = K.shape[0] + self.K_fit_rows_ = xp.sum(K, axis=0) / n_samples + self.K_fit_all_ = xp.sum(self.K_fit_rows_) / n_samples + return self + + def transform(self, K, copy=True): + """Center kernel matrix. + + Parameters + ---------- + K : ndarray of shape (n_samples1, n_samples2) + Kernel matrix. + + copy : bool, default=True + Set to False to perform inplace computation. + + Returns + ------- + K_new : ndarray of shape (n_samples1, n_samples2) + Returns the instance itself. + """ + check_is_fitted(self) + + xp, _ = get_namespace(K) + + K = self._validate_data( + K, copy=copy, dtype=_array_api.supported_float_dtypes(xp), reset=False + ) + + K_pred_cols = (xp.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, None] + + K -= self.K_fit_rows_ + K -= K_pred_cols + K += self.K_fit_all_ + + return K + + @property + def _n_features_out(self): + """Number of transformed output features.""" + # Used by ClassNamePrefixFeaturesOutMixin. This model preserves the + # number of input features but this is not a one-to-one mapping in the + # usual sense. Hence the choice not to use OneToOneFeatureMixin to + # implement get_feature_names_out for this class. + return self.n_features_in_ + + def _more_tags(self): + return {"pairwise": True, "array_api_support": True} + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "value": [Interval(Real, None, None, closed="neither")], + }, + prefer_skip_nested_validation=True, +) +def add_dummy_feature(X, value=1.0): + """Augment dataset with an additional dummy feature. + + This is useful for fitting an intercept term with implementations which + cannot otherwise fit it directly. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + value : float + Value to use for the dummy feature. + + Returns + ------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features + 1) + Same data with dummy feature added as first column. + + Examples + -------- + >>> from sklearn.preprocessing import add_dummy_feature + >>> add_dummy_feature([[0, 1], [1, 0]]) + array([[1., 0., 1.], + [1., 1., 0.]]) + """ + X = check_array(X, accept_sparse=["csc", "csr", "coo"], dtype=FLOAT_DTYPES) + n_samples, n_features = X.shape + shape = (n_samples, n_features + 1) + if sparse.issparse(X): + if X.format == "coo": + # Shift columns to the right. + col = X.col + 1 + # Column indices of dummy feature are 0 everywhere. + col = np.concatenate((np.zeros(n_samples), col)) + # Row indices of dummy feature are 0, ..., n_samples-1. + row = np.concatenate((np.arange(n_samples), X.row)) + # Prepend the dummy feature n_samples times. + data = np.concatenate((np.full(n_samples, value), X.data)) + return sparse.coo_matrix((data, (row, col)), shape) + elif X.format == "csc": + # Shift index pointers since we need to add n_samples elements. + indptr = X.indptr + n_samples + # indptr[0] must be 0. + indptr = np.concatenate((np.array([0]), indptr)) + # Row indices of dummy feature are 0, ..., n_samples-1. + indices = np.concatenate((np.arange(n_samples), X.indices)) + # Prepend the dummy feature n_samples times. + data = np.concatenate((np.full(n_samples, value), X.data)) + return sparse.csc_matrix((data, indices, indptr), shape) + else: + klass = X.__class__ + return klass(add_dummy_feature(X.tocoo(), value)) + else: + return np.hstack((np.full((n_samples, 1), value), X)) + + +class QuantileTransformer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Transform features using quantiles information. + + This method transforms the features to follow a uniform or a normal + distribution. Therefore, for a given feature, this transformation tends + to spread out the most frequent values. It also reduces the impact of + (marginal) outliers: this is therefore a robust preprocessing scheme. + + The transformation is applied on each feature independently. First an + estimate of the cumulative distribution function of a feature is + used to map the original values to a uniform distribution. The obtained + values are then mapped to the desired output distribution using the + associated quantile function. Features values of new/unseen data that fall + below or above the fitted range will be mapped to the bounds of the output + distribution. Note that this transform is non-linear. It may distort linear + correlations between variables measured at the same scale but renders + variables measured at different scales more directly comparable. + + For example visualizations, refer to :ref:`Compare QuantileTransformer with + other scalers `. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.19 + + Parameters + ---------- + n_quantiles : int, default=1000 or n_samples + Number of quantiles to be computed. It corresponds to the number + of landmarks used to discretize the cumulative distribution function. + If n_quantiles is larger than the number of samples, n_quantiles is set + to the number of samples as a larger number of quantiles does not give + a better approximation of the cumulative distribution function + estimator. + + output_distribution : {'uniform', 'normal'}, default='uniform' + Marginal distribution for the transformed data. The choices are + 'uniform' (default) or 'normal'. + + ignore_implicit_zeros : bool, default=False + Only applies to sparse matrices. If True, the sparse entries of the + matrix are discarded to compute the quantile statistics. If False, + these entries are treated as zeros. + + subsample : int, default=10_000 + Maximum number of samples used to estimate the quantiles for + computational efficiency. Note that the subsampling procedure may + differ for value-identical sparse and dense matrices. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for subsampling and smoothing + noise. + Please see ``subsample`` for more details. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + copy : bool, default=True + Set to False to perform inplace transformation and avoid a copy (if the + input is already a numpy array). + + Attributes + ---------- + n_quantiles_ : int + The actual number of quantiles used to discretize the cumulative + distribution function. + + quantiles_ : ndarray of shape (n_quantiles, n_features) + The values corresponding the quantiles of reference. + + references_ : ndarray of shape (n_quantiles, ) + Quantiles of references. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + quantile_transform : Equivalent function without the estimator API. + PowerTransformer : Perform mapping to a normal distribution using a power + transform. + StandardScaler : Perform standardization that is faster, but less robust + to outliers. + RobustScaler : Perform robust standardization that removes the influence + of outliers but does not put outliers and inliers on the same scale. + + Notes + ----- + NaNs are treated as missing values: disregarded in fit, and maintained in + transform. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import QuantileTransformer + >>> rng = np.random.RandomState(0) + >>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0) + >>> qt = QuantileTransformer(n_quantiles=10, random_state=0) + >>> qt.fit_transform(X) + array([...]) + """ + + _parameter_constraints: dict = { + "n_quantiles": [Interval(Integral, 1, None, closed="left")], + "output_distribution": [StrOptions({"uniform", "normal"})], + "ignore_implicit_zeros": ["boolean"], + "subsample": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + "copy": ["boolean"], + } + + def __init__( + self, + *, + n_quantiles=1000, + output_distribution="uniform", + ignore_implicit_zeros=False, + subsample=10_000, + random_state=None, + copy=True, + ): + self.n_quantiles = n_quantiles + self.output_distribution = output_distribution + self.ignore_implicit_zeros = ignore_implicit_zeros + self.subsample = subsample + self.random_state = random_state + self.copy = copy + + def _dense_fit(self, X, random_state): + """Compute percentiles for dense matrices. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The data used to scale along the features axis. + """ + if self.ignore_implicit_zeros: + warnings.warn( + "'ignore_implicit_zeros' takes effect only with" + " sparse matrix. This parameter has no effect." + ) + + n_samples, n_features = X.shape + references = self.references_ * 100 + + self.quantiles_ = [] + for col in X.T: + if self.subsample < n_samples: + subsample_idx = random_state.choice( + n_samples, size=self.subsample, replace=False + ) + col = col.take(subsample_idx, mode="clip") + self.quantiles_.append(np.nanpercentile(col, references)) + self.quantiles_ = np.transpose(self.quantiles_) + # Due to floating-point precision error in `np.nanpercentile`, + # make sure that quantiles are monotonically increasing. + # Upstream issue in numpy: + # https://github.com/numpy/numpy/issues/14685 + self.quantiles_ = np.maximum.accumulate(self.quantiles_) + + def _sparse_fit(self, X, random_state): + """Compute percentiles for sparse matrices. + + Parameters + ---------- + X : sparse matrix of shape (n_samples, n_features) + The data used to scale along the features axis. The sparse matrix + needs to be nonnegative. If a sparse matrix is provided, + it will be converted into a sparse ``csc_matrix``. + """ + n_samples, n_features = X.shape + references = self.references_ * 100 + + self.quantiles_ = [] + for feature_idx in range(n_features): + column_nnz_data = X.data[X.indptr[feature_idx] : X.indptr[feature_idx + 1]] + if len(column_nnz_data) > self.subsample: + column_subsample = self.subsample * len(column_nnz_data) // n_samples + if self.ignore_implicit_zeros: + column_data = np.zeros(shape=column_subsample, dtype=X.dtype) + else: + column_data = np.zeros(shape=self.subsample, dtype=X.dtype) + column_data[:column_subsample] = random_state.choice( + column_nnz_data, size=column_subsample, replace=False + ) + else: + if self.ignore_implicit_zeros: + column_data = np.zeros(shape=len(column_nnz_data), dtype=X.dtype) + else: + column_data = np.zeros(shape=n_samples, dtype=X.dtype) + column_data[: len(column_nnz_data)] = column_nnz_data + + if not column_data.size: + # if no nnz, an error will be raised for computing the + # quantiles. Force the quantiles to be zeros. + self.quantiles_.append([0] * len(references)) + else: + self.quantiles_.append(np.nanpercentile(column_data, references)) + self.quantiles_ = np.transpose(self.quantiles_) + # due to floating-point precision error in `np.nanpercentile`, + # make sure the quantiles are monotonically increasing + # Upstream issue in numpy: + # https://github.com/numpy/numpy/issues/14685 + self.quantiles_ = np.maximum.accumulate(self.quantiles_) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Compute the quantiles used for transforming. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to scale along the features axis. If a sparse + matrix is provided, it will be converted into a sparse + ``csc_matrix``. Additionally, the sparse matrix needs to be + nonnegative if `ignore_implicit_zeros` is False. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted transformer. + """ + if self.n_quantiles > self.subsample: + raise ValueError( + "The number of quantiles cannot be greater than" + " the number of samples used. Got {} quantiles" + " and {} samples.".format(self.n_quantiles, self.subsample) + ) + + X = self._check_inputs(X, in_fit=True, copy=False) + n_samples = X.shape[0] + + if self.n_quantiles > n_samples: + warnings.warn( + "n_quantiles (%s) is greater than the total number " + "of samples (%s). n_quantiles is set to " + "n_samples." % (self.n_quantiles, n_samples) + ) + self.n_quantiles_ = max(1, min(self.n_quantiles, n_samples)) + + rng = check_random_state(self.random_state) + + # Create the quantiles of reference + self.references_ = np.linspace(0, 1, self.n_quantiles_, endpoint=True) + if sparse.issparse(X): + self._sparse_fit(X, rng) + else: + self._dense_fit(X, rng) + + return self + + def _transform_col(self, X_col, quantiles, inverse): + """Private function to transform a single feature.""" + + output_distribution = self.output_distribution + + if not inverse: + lower_bound_x = quantiles[0] + upper_bound_x = quantiles[-1] + lower_bound_y = 0 + upper_bound_y = 1 + else: + lower_bound_x = 0 + upper_bound_x = 1 + lower_bound_y = quantiles[0] + upper_bound_y = quantiles[-1] + # for inverse transform, match a uniform distribution + with np.errstate(invalid="ignore"): # hide NaN comparison warnings + if output_distribution == "normal": + X_col = stats.norm.cdf(X_col) + # else output distribution is already a uniform distribution + + # find index for lower and higher bounds + with np.errstate(invalid="ignore"): # hide NaN comparison warnings + if output_distribution == "normal": + lower_bounds_idx = X_col - BOUNDS_THRESHOLD < lower_bound_x + upper_bounds_idx = X_col + BOUNDS_THRESHOLD > upper_bound_x + if output_distribution == "uniform": + lower_bounds_idx = X_col == lower_bound_x + upper_bounds_idx = X_col == upper_bound_x + + isfinite_mask = ~np.isnan(X_col) + X_col_finite = X_col[isfinite_mask] + if not inverse: + # Interpolate in one direction and in the other and take the + # mean. This is in case of repeated values in the features + # and hence repeated quantiles + # + # If we don't do this, only one extreme of the duplicated is + # used (the upper when we do ascending, and the + # lower for descending). We take the mean of these two + X_col[isfinite_mask] = 0.5 * ( + np.interp(X_col_finite, quantiles, self.references_) + - np.interp(-X_col_finite, -quantiles[::-1], -self.references_[::-1]) + ) + else: + X_col[isfinite_mask] = np.interp(X_col_finite, self.references_, quantiles) + + X_col[upper_bounds_idx] = upper_bound_y + X_col[lower_bounds_idx] = lower_bound_y + # for forward transform, match the output distribution + if not inverse: + with np.errstate(invalid="ignore"): # hide NaN comparison warnings + if output_distribution == "normal": + X_col = stats.norm.ppf(X_col) + # find the value to clip the data to avoid mapping to + # infinity. Clip such that the inverse transform will be + # consistent + clip_min = stats.norm.ppf(BOUNDS_THRESHOLD - np.spacing(1)) + clip_max = stats.norm.ppf(1 - (BOUNDS_THRESHOLD - np.spacing(1))) + X_col = np.clip(X_col, clip_min, clip_max) + # else output distribution is uniform and the ppf is the + # identity function so we let X_col unchanged + + return X_col + + def _check_inputs(self, X, in_fit, accept_sparse_negative=False, copy=False): + """Check inputs before fit and transform.""" + X = self._validate_data( + X, + reset=in_fit, + accept_sparse="csc", + copy=copy, + dtype=FLOAT_DTYPES, + force_all_finite="allow-nan", + ) + # we only accept positive sparse matrix when ignore_implicit_zeros is + # false and that we call fit or transform. + with np.errstate(invalid="ignore"): # hide NaN comparison warnings + if ( + not accept_sparse_negative + and not self.ignore_implicit_zeros + and (sparse.issparse(X) and np.any(X.data < 0)) + ): + raise ValueError( + "QuantileTransformer only accepts non-negative sparse matrices." + ) + + return X + + def _transform(self, X, inverse=False): + """Forward and inverse transform. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The data used to scale along the features axis. + + inverse : bool, default=False + If False, apply forward transform. If True, apply + inverse transform. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + Projected data. + """ + if sparse.issparse(X): + for feature_idx in range(X.shape[1]): + column_slice = slice(X.indptr[feature_idx], X.indptr[feature_idx + 1]) + X.data[column_slice] = self._transform_col( + X.data[column_slice], self.quantiles_[:, feature_idx], inverse + ) + else: + for feature_idx in range(X.shape[1]): + X[:, feature_idx] = self._transform_col( + X[:, feature_idx], self.quantiles_[:, feature_idx], inverse + ) + + return X + + def transform(self, X): + """Feature-wise transformation of the data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to scale along the features axis. If a sparse + matrix is provided, it will be converted into a sparse + ``csc_matrix``. Additionally, the sparse matrix needs to be + nonnegative if `ignore_implicit_zeros` is False. + + Returns + ------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) + The projected data. + """ + check_is_fitted(self) + X = self._check_inputs(X, in_fit=False, copy=self.copy) + + return self._transform(X, inverse=False) + + def inverse_transform(self, X): + """Back-projection to the original space. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data used to scale along the features axis. If a sparse + matrix is provided, it will be converted into a sparse + ``csc_matrix``. Additionally, the sparse matrix needs to be + nonnegative if `ignore_implicit_zeros` is False. + + Returns + ------- + Xt : {ndarray, sparse matrix} of (n_samples, n_features) + The projected data. + """ + check_is_fitted(self) + X = self._check_inputs( + X, in_fit=False, accept_sparse_negative=True, copy=self.copy + ) + + return self._transform(X, inverse=True) + + def _more_tags(self): + return {"allow_nan": True} + + +@validate_params( + {"X": ["array-like", "sparse matrix"], "axis": [Options(Integral, {0, 1})]}, + prefer_skip_nested_validation=False, +) +def quantile_transform( + X, + *, + axis=0, + n_quantiles=1000, + output_distribution="uniform", + ignore_implicit_zeros=False, + subsample=int(1e5), + random_state=None, + copy=True, +): + """Transform features using quantiles information. + + This method transforms the features to follow a uniform or a normal + distribution. Therefore, for a given feature, this transformation tends + to spread out the most frequent values. It also reduces the impact of + (marginal) outliers: this is therefore a robust preprocessing scheme. + + The transformation is applied on each feature independently. First an + estimate of the cumulative distribution function of a feature is + used to map the original values to a uniform distribution. The obtained + values are then mapped to the desired output distribution using the + associated quantile function. Features values of new/unseen data that fall + below or above the fitted range will be mapped to the bounds of the output + distribution. Note that this transform is non-linear. It may distort linear + correlations between variables measured at the same scale but renders + variables measured at different scales more directly comparable. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to transform. + + axis : int, default=0 + Axis used to compute the means and standard deviations along. If 0, + transform each feature, otherwise (if 1) transform each sample. + + n_quantiles : int, default=1000 or n_samples + Number of quantiles to be computed. It corresponds to the number + of landmarks used to discretize the cumulative distribution function. + If n_quantiles is larger than the number of samples, n_quantiles is set + to the number of samples as a larger number of quantiles does not give + a better approximation of the cumulative distribution function + estimator. + + output_distribution : {'uniform', 'normal'}, default='uniform' + Marginal distribution for the transformed data. The choices are + 'uniform' (default) or 'normal'. + + ignore_implicit_zeros : bool, default=False + Only applies to sparse matrices. If True, the sparse entries of the + matrix are discarded to compute the quantile statistics. If False, + these entries are treated as zeros. + + subsample : int, default=1e5 + Maximum number of samples used to estimate the quantiles for + computational efficiency. Note that the subsampling procedure may + differ for value-identical sparse and dense matrices. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for subsampling and smoothing + noise. + Please see ``subsample`` for more details. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + copy : bool, default=True + If False, try to avoid a copy and transform in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + .. versionchanged:: 0.23 + The default value of `copy` changed from False to True in 0.23. + + Returns + ------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) + The transformed data. + + See Also + -------- + QuantileTransformer : Performs quantile-based scaling using the + Transformer API (e.g. as part of a preprocessing + :class:`~sklearn.pipeline.Pipeline`). + power_transform : Maps data to a normal distribution using a + power transformation. + scale : Performs standardization that is faster, but less robust + to outliers. + robust_scale : Performs robust standardization that removes the influence + of outliers but does not put outliers and inliers on the same scale. + + Notes + ----- + NaNs are treated as missing values: disregarded in fit, and maintained in + transform. + + .. warning:: Risk of data leak + + Do not use :func:`~sklearn.preprocessing.quantile_transform` unless + you know what you are doing. A common mistake is to apply it + to the entire data *before* splitting into training and + test sets. This will bias the model evaluation because + information would have leaked from the test set to the + training set. + In general, we recommend using + :class:`~sklearn.preprocessing.QuantileTransformer` within a + :ref:`Pipeline ` in order to prevent most risks of data + leaking:`pipe = make_pipeline(QuantileTransformer(), + LogisticRegression())`. + + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import quantile_transform + >>> rng = np.random.RandomState(0) + >>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0) + >>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True) + array([...]) + """ + n = QuantileTransformer( + n_quantiles=n_quantiles, + output_distribution=output_distribution, + subsample=subsample, + ignore_implicit_zeros=ignore_implicit_zeros, + random_state=random_state, + copy=copy, + ) + if axis == 0: + X = n.fit_transform(X) + else: # axis == 1 + X = n.fit_transform(X.T).T + return X + + +class PowerTransformer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): + """Apply a power transform featurewise to make data more Gaussian-like. + + Power transforms are a family of parametric, monotonic transformations + that are applied to make data more Gaussian-like. This is useful for + modeling issues related to heteroscedasticity (non-constant variance), + or other situations where normality is desired. + + Currently, PowerTransformer supports the Box-Cox transform and the + Yeo-Johnson transform. The optimal parameter for stabilizing variance and + minimizing skewness is estimated through maximum likelihood. + + Box-Cox requires input data to be strictly positive, while Yeo-Johnson + supports both positive or negative data. + + By default, zero-mean, unit-variance normalization is applied to the + transformed data. + + For an example visualization, refer to :ref:`Compare PowerTransformer with + other scalers `. To see the + effect of Box-Cox and Yeo-Johnson transformations on different + distributions, see: + :ref:`sphx_glr_auto_examples_preprocessing_plot_map_data_to_normal.py`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson' + The power transform method. Available methods are: + + - 'yeo-johnson' [1]_, works with positive and negative values + - 'box-cox' [2]_, only works with strictly positive values + + standardize : bool, default=True + Set to True to apply zero-mean, unit-variance normalization to the + transformed output. + + copy : bool, default=True + Set to False to perform inplace computation during transformation. + + Attributes + ---------- + lambdas_ : ndarray of float of shape (n_features,) + The parameters of the power transformation for the selected features. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + power_transform : Equivalent function without the estimator API. + + QuantileTransformer : Maps data to a standard normal distribution with + the parameter `output_distribution='normal'`. + + Notes + ----- + NaNs are treated as missing values: disregarded in ``fit``, and maintained + in ``transform``. + + References + ---------- + + .. [1] :doi:`I.K. Yeo and R.A. Johnson, "A new family of power + transformations to improve normality or symmetry." Biometrika, + 87(4), pp.954-959, (2000). <10.1093/biomet/87.4.954>` + + .. [2] :doi:`G.E.P. Box and D.R. Cox, "An Analysis of Transformations", + Journal of the Royal Statistical Society B, 26, 211-252 (1964). + <10.1111/j.2517-6161.1964.tb00553.x>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import PowerTransformer + >>> pt = PowerTransformer() + >>> data = [[1, 2], [3, 2], [4, 5]] + >>> print(pt.fit(data)) + PowerTransformer() + >>> print(pt.lambdas_) + [ 1.386... -3.100...] + >>> print(pt.transform(data)) + [[-1.316... -0.707...] + [ 0.209... -0.707...] + [ 1.106... 1.414...]] + """ + + _parameter_constraints: dict = { + "method": [StrOptions({"yeo-johnson", "box-cox"})], + "standardize": ["boolean"], + "copy": ["boolean"], + } + + def __init__(self, method="yeo-johnson", *, standardize=True, copy=True): + self.method = method + self.standardize = standardize + self.copy = copy + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Estimate the optimal parameter lambda for each feature. + + The optimal lambda parameter for minimizing skewness is estimated on + each feature independently using maximum likelihood. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data used to estimate the optimal transformation parameters. + + y : None + Ignored. + + Returns + ------- + self : object + Fitted transformer. + """ + self._fit(X, y=y, force_transform=False) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Fit `PowerTransformer` to `X`, then transform `X`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data used to estimate the optimal transformation parameters + and to be transformed using a power transformation. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_features) + Transformed data. + """ + return self._fit(X, y, force_transform=True) + + def _fit(self, X, y=None, force_transform=False): + X = self._check_input(X, in_fit=True, check_positive=True) + + if not self.copy and not force_transform: # if call from fit() + X = X.copy() # force copy so that fit does not change X inplace + + n_samples = X.shape[0] + mean = np.mean(X, axis=0, dtype=np.float64) + var = np.var(X, axis=0, dtype=np.float64) + + optim_function = { + "box-cox": self._box_cox_optimize, + "yeo-johnson": self._yeo_johnson_optimize, + }[self.method] + + transform_function = { + "box-cox": boxcox, + "yeo-johnson": self._yeo_johnson_transform, + }[self.method] + + with np.errstate(invalid="ignore"): # hide NaN warnings + self.lambdas_ = np.empty(X.shape[1], dtype=X.dtype) + for i, col in enumerate(X.T): + # For yeo-johnson, leave constant features unchanged + # lambda=1 corresponds to the identity transformation + is_constant_feature = _is_constant_feature(var[i], mean[i], n_samples) + if self.method == "yeo-johnson" and is_constant_feature: + self.lambdas_[i] = 1.0 + continue + + self.lambdas_[i] = optim_function(col) + + if self.standardize or force_transform: + X[:, i] = transform_function(X[:, i], self.lambdas_[i]) + + if self.standardize: + self._scaler = StandardScaler(copy=False).set_output(transform="default") + if force_transform: + X = self._scaler.fit_transform(X) + else: + self._scaler.fit(X) + + return X + + def transform(self, X): + """Apply the power transform to each feature using the fitted lambdas. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to be transformed using a power transformation. + + Returns + ------- + X_trans : ndarray of shape (n_samples, n_features) + The transformed data. + """ + check_is_fitted(self) + X = self._check_input(X, in_fit=False, check_positive=True, check_shape=True) + + transform_function = { + "box-cox": boxcox, + "yeo-johnson": self._yeo_johnson_transform, + }[self.method] + for i, lmbda in enumerate(self.lambdas_): + with np.errstate(invalid="ignore"): # hide NaN warnings + X[:, i] = transform_function(X[:, i], lmbda) + + if self.standardize: + X = self._scaler.transform(X) + + return X + + def inverse_transform(self, X): + """Apply the inverse power transformation using the fitted lambdas. + + The inverse of the Box-Cox transformation is given by:: + + if lambda_ == 0: + X = exp(X_trans) + else: + X = (X_trans * lambda_ + 1) ** (1 / lambda_) + + The inverse of the Yeo-Johnson transformation is given by:: + + if X >= 0 and lambda_ == 0: + X = exp(X_trans) - 1 + elif X >= 0 and lambda_ != 0: + X = (X_trans * lambda_ + 1) ** (1 / lambda_) - 1 + elif X < 0 and lambda_ != 2: + X = 1 - (-(2 - lambda_) * X_trans + 1) ** (1 / (2 - lambda_)) + elif X < 0 and lambda_ == 2: + X = 1 - exp(-X_trans) + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The transformed data. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The original data. + """ + check_is_fitted(self) + X = self._check_input(X, in_fit=False, check_shape=True) + + if self.standardize: + X = self._scaler.inverse_transform(X) + + inv_fun = { + "box-cox": self._box_cox_inverse_tranform, + "yeo-johnson": self._yeo_johnson_inverse_transform, + }[self.method] + for i, lmbda in enumerate(self.lambdas_): + with np.errstate(invalid="ignore"): # hide NaN warnings + X[:, i] = inv_fun(X[:, i], lmbda) + + return X + + def _box_cox_inverse_tranform(self, x, lmbda): + """Return inverse-transformed input x following Box-Cox inverse + transform with parameter lambda. + """ + if lmbda == 0: + x_inv = np.exp(x) + else: + x_inv = (x * lmbda + 1) ** (1 / lmbda) + + return x_inv + + def _yeo_johnson_inverse_transform(self, x, lmbda): + """Return inverse-transformed input x following Yeo-Johnson inverse + transform with parameter lambda. + """ + x_inv = np.zeros_like(x) + pos = x >= 0 + + # when x >= 0 + if abs(lmbda) < np.spacing(1.0): + x_inv[pos] = np.exp(x[pos]) - 1 + else: # lmbda != 0 + x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1 + + # when x < 0 + if abs(lmbda - 2) > np.spacing(1.0): + x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1, 1 / (2 - lmbda)) + else: # lmbda == 2 + x_inv[~pos] = 1 - np.exp(-x[~pos]) + + return x_inv + + def _yeo_johnson_transform(self, x, lmbda): + """Return transformed input x following Yeo-Johnson transform with + parameter lambda. + """ + + out = np.zeros_like(x) + pos = x >= 0 # binary mask + + # when x >= 0 + if abs(lmbda) < np.spacing(1.0): + out[pos] = np.log1p(x[pos]) + else: # lmbda != 0 + out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda + + # when x < 0 + if abs(lmbda - 2) > np.spacing(1.0): + out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda) + else: # lmbda == 2 + out[~pos] = -np.log1p(-x[~pos]) + + return out + + def _box_cox_optimize(self, x): + """Find and return optimal lambda parameter of the Box-Cox transform by + MLE, for observed data x. + + We here use scipy builtins which uses the brent optimizer. + """ + mask = np.isnan(x) + if np.all(mask): + raise ValueError("Column must not be all nan.") + + # the computation of lambda is influenced by NaNs so we need to + # get rid of them + _, lmbda = stats.boxcox(x[~mask], lmbda=None) + + return lmbda + + def _yeo_johnson_optimize(self, x): + """Find and return optimal lambda parameter of the Yeo-Johnson + transform by MLE, for observed data x. + + Like for Box-Cox, MLE is done via the brent optimizer. + """ + x_tiny = np.finfo(np.float64).tiny + + def _neg_log_likelihood(lmbda): + """Return the negative log likelihood of the observed data x as a + function of lambda.""" + x_trans = self._yeo_johnson_transform(x, lmbda) + n_samples = x.shape[0] + x_trans_var = x_trans.var() + + # Reject transformed data that would raise a RuntimeWarning in np.log + if x_trans_var < x_tiny: + return np.inf + + log_var = np.log(x_trans_var) + loglike = -n_samples / 2 * log_var + loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum() + + return -loglike + + # the computation of lambda is influenced by NaNs so we need to + # get rid of them + x = x[~np.isnan(x)] + # choosing bracket -2, 2 like for boxcox + return optimize.brent(_neg_log_likelihood, brack=(-2, 2)) + + def _check_input(self, X, in_fit, check_positive=False, check_shape=False): + """Validate the input before fit and transform. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + in_fit : bool + Whether or not `_check_input` is called from `fit` or other + methods, e.g. `predict`, `transform`, etc. + + check_positive : bool, default=False + If True, check that all data is positive and non-zero (only if + ``self.method=='box-cox'``). + + check_shape : bool, default=False + If True, check that n_features matches the length of self.lambdas_ + """ + X = self._validate_data( + X, + ensure_2d=True, + dtype=FLOAT_DTYPES, + copy=self.copy, + force_all_finite="allow-nan", + reset=in_fit, + ) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered") + if check_positive and self.method == "box-cox" and np.nanmin(X) <= 0: + raise ValueError( + "The Box-Cox transformation can only be " + "applied to strictly positive data" + ) + + if check_shape and not X.shape[1] == len(self.lambdas_): + raise ValueError( + "Input data has a different number of features " + "than fitting data. Should have {n}, data has {m}".format( + n=len(self.lambdas_), m=X.shape[1] + ) + ) + + return X + + def _more_tags(self): + return {"allow_nan": True} + + +@validate_params( + {"X": ["array-like"]}, + prefer_skip_nested_validation=False, +) +def power_transform(X, method="yeo-johnson", *, standardize=True, copy=True): + """Parametric, monotonic transformation to make data more Gaussian-like. + + Power transforms are a family of parametric, monotonic transformations + that are applied to make data more Gaussian-like. This is useful for + modeling issues related to heteroscedasticity (non-constant variance), + or other situations where normality is desired. + + Currently, power_transform supports the Box-Cox transform and the + Yeo-Johnson transform. The optimal parameter for stabilizing variance and + minimizing skewness is estimated through maximum likelihood. + + Box-Cox requires input data to be strictly positive, while Yeo-Johnson + supports both positive or negative data. + + By default, zero-mean, unit-variance normalization is applied to the + transformed data. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to be transformed using a power transformation. + + method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson' + The power transform method. Available methods are: + + - 'yeo-johnson' [1]_, works with positive and negative values + - 'box-cox' [2]_, only works with strictly positive values + + .. versionchanged:: 0.23 + The default value of the `method` parameter changed from + 'box-cox' to 'yeo-johnson' in 0.23. + + standardize : bool, default=True + Set to True to apply zero-mean, unit-variance normalization to the + transformed output. + + copy : bool, default=True + If False, try to avoid a copy and transform in place. + This is not guaranteed to always work in place; e.g. if the data is + a numpy array with an int dtype, a copy will be returned even with + copy=False. + + Returns + ------- + X_trans : ndarray of shape (n_samples, n_features) + The transformed data. + + See Also + -------- + PowerTransformer : Equivalent transformation with the + Transformer API (e.g. as part of a preprocessing + :class:`~sklearn.pipeline.Pipeline`). + + quantile_transform : Maps data to a standard normal distribution with + the parameter `output_distribution='normal'`. + + Notes + ----- + NaNs are treated as missing values: disregarded in ``fit``, and maintained + in ``transform``. + + For a comparison of the different scalers, transformers, and normalizers, + see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. + + References + ---------- + + .. [1] I.K. Yeo and R.A. Johnson, "A new family of power transformations to + improve normality or symmetry." Biometrika, 87(4), pp.954-959, + (2000). + + .. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal + of the Royal Statistical Society B, 26, 211-252 (1964). + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import power_transform + >>> data = [[1, 2], [3, 2], [4, 5]] + >>> print(power_transform(data, method='box-cox')) + [[-1.332... -0.707...] + [ 0.256... -0.707...] + [ 1.076... 1.414...]] + + .. warning:: Risk of data leak. + Do not use :func:`~sklearn.preprocessing.power_transform` unless you + know what you are doing. A common mistake is to apply it to the entire + data *before* splitting into training and test sets. This will bias the + model evaluation because information would have leaked from the test + set to the training set. + In general, we recommend using + :class:`~sklearn.preprocessing.PowerTransformer` within a + :ref:`Pipeline ` in order to prevent most risks of data + leaking, e.g.: `pipe = make_pipeline(PowerTransformer(), + LogisticRegression())`. + """ + pt = PowerTransformer(method=method, standardize=standardize, copy=copy) + return pt.fit_transform(X) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_discretization.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_discretization.py new file mode 100644 index 0000000000000000000000000000000000000000..033bdd960d2b215bde276e17637858b62e98ffa0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_discretization.py @@ -0,0 +1,472 @@ +# Author: Henry Lin +# Tom Dupré la Tour + +# License: BSD + + +import warnings +from numbers import Integral + +import numpy as np + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import _safe_indexing +from ..utils._param_validation import Hidden, Interval, Options, StrOptions +from ..utils.stats import _weighted_percentile +from ..utils.validation import ( + _check_feature_names_in, + _check_sample_weight, + check_array, + check_is_fitted, + check_random_state, +) +from ._encoders import OneHotEncoder + + +class KBinsDiscretizer(TransformerMixin, BaseEstimator): + """ + Bin continuous data into intervals. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + n_bins : int or array-like of shape (n_features,), default=5 + The number of bins to produce. Raises ValueError if ``n_bins < 2``. + + encode : {'onehot', 'onehot-dense', 'ordinal'}, default='onehot' + Method used to encode the transformed result. + + - 'onehot': Encode the transformed result with one-hot encoding + and return a sparse matrix. Ignored features are always + stacked to the right. + - 'onehot-dense': Encode the transformed result with one-hot encoding + and return a dense array. Ignored features are always + stacked to the right. + - 'ordinal': Return the bin identifier encoded as an integer value. + + strategy : {'uniform', 'quantile', 'kmeans'}, default='quantile' + Strategy used to define the widths of the bins. + + - 'uniform': All bins in each feature have identical widths. + - 'quantile': All bins in each feature have the same number of points. + - 'kmeans': Values in each bin have the same nearest center of a 1D + k-means cluster. + + For an example of the different strategies see: + :ref:`sphx_glr_auto_examples_preprocessing_plot_discretization_strategies.py`. + + dtype : {np.float32, np.float64}, default=None + The desired data-type for the output. If None, output dtype is + consistent with input dtype. Only np.float32 and np.float64 are + supported. + + .. versionadded:: 0.24 + + subsample : int or None, default='warn' + Maximum number of samples, used to fit the model, for computational + efficiency. Defaults to 200_000 when `strategy='quantile'` and to `None` + when `strategy='uniform'` or `strategy='kmeans'`. + `subsample=None` means that all the training samples are used when + computing the quantiles that determine the binning thresholds. + Since quantile computation relies on sorting each column of `X` and + that sorting has an `n log(n)` time complexity, + it is recommended to use subsampling on datasets with a + very large number of samples. + + .. versionchanged:: 1.3 + The default value of `subsample` changed from `None` to `200_000` when + `strategy="quantile"`. + + .. versionchanged:: 1.5 + The default value of `subsample` changed from `None` to `200_000` when + `strategy="uniform"` or `strategy="kmeans"`. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for subsampling. + Pass an int for reproducible results across multiple function calls. + See the `subsample` parameter for more details. + See :term:`Glossary `. + + .. versionadded:: 1.1 + + Attributes + ---------- + bin_edges_ : ndarray of ndarray of shape (n_features,) + The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )`` + Ignored features will have empty arrays. + + n_bins_ : ndarray of shape (n_features,), dtype=np.int64 + Number of bins per feature. Bins whose width are too small + (i.e., <= 1e-8) are removed with a warning. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Binarizer : Class used to bin values as ``0`` or + ``1`` based on a parameter ``threshold``. + + Notes + ----- + + For a visualization of discretization on different datasets refer to + :ref:`sphx_glr_auto_examples_preprocessing_plot_discretization_classification.py`. + On the effect of discretization on linear models see: + :ref:`sphx_glr_auto_examples_preprocessing_plot_discretization.py`. + + In bin edges for feature ``i``, the first and last values are used only for + ``inverse_transform``. During transform, bin edges are extended to:: + + np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf]) + + You can combine ``KBinsDiscretizer`` with + :class:`~sklearn.compose.ColumnTransformer` if you only want to preprocess + part of the features. + + ``KBinsDiscretizer`` might produce constant features (e.g., when + ``encode = 'onehot'`` and certain bins do not contain any data). + These features can be removed with feature selection algorithms + (e.g., :class:`~sklearn.feature_selection.VarianceThreshold`). + + Examples + -------- + >>> from sklearn.preprocessing import KBinsDiscretizer + >>> X = [[-2, 1, -4, -1], + ... [-1, 2, -3, -0.5], + ... [ 0, 3, -2, 0.5], + ... [ 1, 4, -1, 2]] + >>> est = KBinsDiscretizer( + ... n_bins=3, encode='ordinal', strategy='uniform', subsample=None + ... ) + >>> est.fit(X) + KBinsDiscretizer(...) + >>> Xt = est.transform(X) + >>> Xt # doctest: +SKIP + array([[ 0., 0., 0., 0.], + [ 1., 1., 1., 0.], + [ 2., 2., 2., 1.], + [ 2., 2., 2., 2.]]) + + Sometimes it may be useful to convert the data back into the original + feature space. The ``inverse_transform`` function converts the binned + data into the original feature space. Each value will be equal to the mean + of the two bin edges. + + >>> est.bin_edges_[0] + array([-2., -1., 0., 1.]) + >>> est.inverse_transform(Xt) + array([[-1.5, 1.5, -3.5, -0.5], + [-0.5, 2.5, -2.5, -0.5], + [ 0.5, 3.5, -1.5, 0.5], + [ 0.5, 3.5, -1.5, 1.5]]) + """ + + _parameter_constraints: dict = { + "n_bins": [Interval(Integral, 2, None, closed="left"), "array-like"], + "encode": [StrOptions({"onehot", "onehot-dense", "ordinal"})], + "strategy": [StrOptions({"uniform", "quantile", "kmeans"})], + "dtype": [Options(type, {np.float64, np.float32}), None], + "subsample": [ + Interval(Integral, 1, None, closed="left"), + None, + Hidden(StrOptions({"warn"})), + ], + "random_state": ["random_state"], + } + + def __init__( + self, + n_bins=5, + *, + encode="onehot", + strategy="quantile", + dtype=None, + subsample="warn", + random_state=None, + ): + self.n_bins = n_bins + self.encode = encode + self.strategy = strategy + self.dtype = dtype + self.subsample = subsample + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, sample_weight=None): + """ + Fit the estimator. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data to be discretized. + + y : None + Ignored. This parameter exists only for compatibility with + :class:`~sklearn.pipeline.Pipeline`. + + sample_weight : ndarray of shape (n_samples,) + Contains weight values to be associated with each sample. + Only possible when `strategy` is set to `"quantile"`. + + .. versionadded:: 1.3 + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X, dtype="numeric") + + if self.dtype in (np.float64, np.float32): + output_dtype = self.dtype + else: # self.dtype is None + output_dtype = X.dtype + + n_samples, n_features = X.shape + + if sample_weight is not None and self.strategy == "uniform": + raise ValueError( + "`sample_weight` was provided but it cannot be " + "used with strategy='uniform'. Got strategy=" + f"{self.strategy!r} instead." + ) + + if self.strategy in ("uniform", "kmeans") and self.subsample == "warn": + warnings.warn( + ( + "In version 1.5 onwards, subsample=200_000 " + "will be used by default. Set subsample explicitly to " + "silence this warning in the mean time. Set " + "subsample=None to disable subsampling explicitly." + ), + FutureWarning, + ) + + subsample = self.subsample + if subsample == "warn": + subsample = 200000 if self.strategy == "quantile" else None + if subsample is not None and n_samples > subsample: + rng = check_random_state(self.random_state) + subsample_idx = rng.choice(n_samples, size=subsample, replace=False) + X = _safe_indexing(X, subsample_idx) + + n_features = X.shape[1] + n_bins = self._validate_n_bins(n_features) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + bin_edges = np.zeros(n_features, dtype=object) + for jj in range(n_features): + column = X[:, jj] + col_min, col_max = column.min(), column.max() + + if col_min == col_max: + warnings.warn( + "Feature %d is constant and will be replaced with 0." % jj + ) + n_bins[jj] = 1 + bin_edges[jj] = np.array([-np.inf, np.inf]) + continue + + if self.strategy == "uniform": + bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1) + + elif self.strategy == "quantile": + quantiles = np.linspace(0, 100, n_bins[jj] + 1) + if sample_weight is None: + bin_edges[jj] = np.asarray(np.percentile(column, quantiles)) + else: + bin_edges[jj] = np.asarray( + [ + _weighted_percentile(column, sample_weight, q) + for q in quantiles + ], + dtype=np.float64, + ) + elif self.strategy == "kmeans": + from ..cluster import KMeans # fixes import loops + + # Deterministic initialization with uniform spacing + uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1) + init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5 + + # 1D k-means procedure + km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1) + centers = km.fit( + column[:, None], sample_weight=sample_weight + ).cluster_centers_[:, 0] + # Must sort, centers may be unsorted even with sorted init + centers.sort() + bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5 + bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max] + + # Remove bins whose width are too small (i.e., <= 1e-8) + if self.strategy in ("quantile", "kmeans"): + mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8 + bin_edges[jj] = bin_edges[jj][mask] + if len(bin_edges[jj]) - 1 != n_bins[jj]: + warnings.warn( + "Bins whose width are too small (i.e., <= " + "1e-8) in feature %d are removed. Consider " + "decreasing the number of bins." % jj + ) + n_bins[jj] = len(bin_edges[jj]) - 1 + + self.bin_edges_ = bin_edges + self.n_bins_ = n_bins + + if "onehot" in self.encode: + self._encoder = OneHotEncoder( + categories=[np.arange(i) for i in self.n_bins_], + sparse_output=self.encode == "onehot", + dtype=output_dtype, + ) + # Fit the OneHotEncoder with toy datasets + # so that it's ready for use after the KBinsDiscretizer is fitted + self._encoder.fit(np.zeros((1, len(self.n_bins_)))) + + return self + + def _validate_n_bins(self, n_features): + """Returns n_bins_, the number of bins per feature.""" + orig_bins = self.n_bins + if isinstance(orig_bins, Integral): + return np.full(n_features, orig_bins, dtype=int) + + n_bins = check_array(orig_bins, dtype=int, copy=True, ensure_2d=False) + + if n_bins.ndim > 1 or n_bins.shape[0] != n_features: + raise ValueError("n_bins must be a scalar or array of shape (n_features,).") + + bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins) + + violating_indices = np.where(bad_nbins_value)[0] + if violating_indices.shape[0] > 0: + indices = ", ".join(str(i) for i in violating_indices) + raise ValueError( + "{} received an invalid number " + "of bins at indices {}. Number of bins " + "must be at least 2, and must be an int.".format( + KBinsDiscretizer.__name__, indices + ) + ) + return n_bins + + def transform(self, X): + """ + Discretize the data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data to be discretized. + + Returns + ------- + Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64} + Data in the binned space. Will be a sparse matrix if + `self.encode='onehot'` and ndarray otherwise. + """ + check_is_fitted(self) + + # check input and attribute dtypes + dtype = (np.float64, np.float32) if self.dtype is None else self.dtype + Xt = self._validate_data(X, copy=True, dtype=dtype, reset=False) + + bin_edges = self.bin_edges_ + for jj in range(Xt.shape[1]): + Xt[:, jj] = np.searchsorted(bin_edges[jj][1:-1], Xt[:, jj], side="right") + + if self.encode == "ordinal": + return Xt + + dtype_init = None + if "onehot" in self.encode: + dtype_init = self._encoder.dtype + self._encoder.dtype = Xt.dtype + try: + Xt_enc = self._encoder.transform(Xt) + finally: + # revert the initial dtype to avoid modifying self. + self._encoder.dtype = dtype_init + return Xt_enc + + def inverse_transform(self, Xt): + """ + Transform discretized data back to original feature space. + + Note that this function does not regenerate the original data + due to discretization rounding. + + Parameters + ---------- + Xt : array-like of shape (n_samples, n_features) + Transformed data in the binned space. + + Returns + ------- + Xinv : ndarray, dtype={np.float32, np.float64} + Data in the original feature space. + """ + check_is_fitted(self) + + if "onehot" in self.encode: + Xt = self._encoder.inverse_transform(Xt) + + Xinv = check_array(Xt, copy=True, dtype=(np.float64, np.float32)) + n_features = self.n_bins_.shape[0] + if Xinv.shape[1] != n_features: + raise ValueError( + "Incorrect number of features. Expecting {}, received {}.".format( + n_features, Xinv.shape[1] + ) + ) + + for jj in range(n_features): + bin_edges = self.bin_edges_[jj] + bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5 + Xinv[:, jj] = bin_centers[(Xinv[:, jj]).astype(np.int64)] + + return Xinv + + def get_feature_names_out(self, input_features=None): + """Get output feature names. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + input_features = _check_feature_names_in(self, input_features) + if hasattr(self, "_encoder"): + return self._encoder.get_feature_names_out(input_features) + + # ordinal encoding + return input_features diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_encoders.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_encoders.py new file mode 100644 index 0000000000000000000000000000000000000000..3feadc68e8d2dc3d7829363a9ba9f2db1b6d6910 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_encoders.py @@ -0,0 +1,1678 @@ +# Authors: Andreas Mueller +# Joris Van den Bossche +# License: BSD 3 clause + +import numbers +import warnings +from numbers import Integral + +import numpy as np +from scipy import sparse + +from ..base import BaseEstimator, OneToOneFeatureMixin, TransformerMixin, _fit_context +from ..utils import _safe_indexing, check_array, is_scalar_nan +from ..utils._encode import _check_unknown, _encode, _get_counts, _unique +from ..utils._mask import _get_mask +from ..utils._param_validation import Interval, RealNotInt, StrOptions +from ..utils._set_output import _get_output_config +from ..utils.validation import _check_feature_names_in, check_is_fitted + +__all__ = ["OneHotEncoder", "OrdinalEncoder"] + + +class _BaseEncoder(TransformerMixin, BaseEstimator): + """ + Base class for encoders that includes the code to categorize and + transform the input features. + + """ + + def _check_X(self, X, force_all_finite=True): + """ + Perform custom check_array: + - convert list of strings to object dtype + - check for missing values for object dtype data (check_array does + not do that) + - return list of features (arrays): this list of features is + constructed feature by feature to preserve the data types + of pandas DataFrame columns, as otherwise information is lost + and cannot be used, e.g. for the `categories_` attribute. + + """ + if not (hasattr(X, "iloc") and getattr(X, "ndim", 0) == 2): + # if not a dataframe, do normal check_array validation + X_temp = check_array(X, dtype=None, force_all_finite=force_all_finite) + if not hasattr(X, "dtype") and np.issubdtype(X_temp.dtype, np.str_): + X = check_array(X, dtype=object, force_all_finite=force_all_finite) + else: + X = X_temp + needs_validation = False + else: + # pandas dataframe, do validation later column by column, in order + # to keep the dtype information to be used in the encoder. + needs_validation = force_all_finite + + n_samples, n_features = X.shape + X_columns = [] + + for i in range(n_features): + Xi = _safe_indexing(X, indices=i, axis=1) + Xi = check_array( + Xi, ensure_2d=False, dtype=None, force_all_finite=needs_validation + ) + X_columns.append(Xi) + + return X_columns, n_samples, n_features + + def _fit( + self, + X, + handle_unknown="error", + force_all_finite=True, + return_counts=False, + return_and_ignore_missing_for_infrequent=False, + ): + self._check_infrequent_enabled() + self._check_n_features(X, reset=True) + self._check_feature_names(X, reset=True) + X_list, n_samples, n_features = self._check_X( + X, force_all_finite=force_all_finite + ) + self.n_features_in_ = n_features + + if self.categories != "auto": + if len(self.categories) != n_features: + raise ValueError( + "Shape mismatch: if categories is an array," + " it has to be of shape (n_features,)." + ) + + self.categories_ = [] + category_counts = [] + compute_counts = return_counts or self._infrequent_enabled + + for i in range(n_features): + Xi = X_list[i] + + if self.categories == "auto": + result = _unique(Xi, return_counts=compute_counts) + if compute_counts: + cats, counts = result + category_counts.append(counts) + else: + cats = result + else: + if np.issubdtype(Xi.dtype, np.str_): + # Always convert string categories to objects to avoid + # unexpected string truncation for longer category labels + # passed in the constructor. + Xi_dtype = object + else: + Xi_dtype = Xi.dtype + + cats = np.array(self.categories[i], dtype=Xi_dtype) + if ( + cats.dtype == object + and isinstance(cats[0], bytes) + and Xi.dtype.kind != "S" + ): + msg = ( + f"In column {i}, the predefined categories have type 'bytes'" + " which is incompatible with values of type" + f" '{type(Xi[0]).__name__}'." + ) + raise ValueError(msg) + + # `nan` must be the last stated category + for category in cats[:-1]: + if is_scalar_nan(category): + raise ValueError( + "Nan should be the last element in user" + f" provided categories, see categories {cats}" + f" in column #{i}" + ) + + if cats.size != len(_unique(cats)): + msg = ( + f"In column {i}, the predefined categories" + " contain duplicate elements." + ) + raise ValueError(msg) + + if Xi.dtype.kind not in "OUS": + sorted_cats = np.sort(cats) + error_msg = ( + "Unsorted categories are not supported for numerical categories" + ) + # if there are nans, nan should be the last element + stop_idx = -1 if np.isnan(sorted_cats[-1]) else None + if np.any(sorted_cats[:stop_idx] != cats[:stop_idx]): + raise ValueError(error_msg) + + if handle_unknown == "error": + diff = _check_unknown(Xi, cats) + if diff: + msg = ( + "Found unknown categories {0} in column {1}" + " during fit".format(diff, i) + ) + raise ValueError(msg) + if compute_counts: + category_counts.append(_get_counts(Xi, cats)) + + self.categories_.append(cats) + + output = {"n_samples": n_samples} + if return_counts: + output["category_counts"] = category_counts + + missing_indices = {} + if return_and_ignore_missing_for_infrequent: + for feature_idx, categories_for_idx in enumerate(self.categories_): + if is_scalar_nan(categories_for_idx[-1]): + # `nan` values can only be placed in the latest position + missing_indices[feature_idx] = categories_for_idx.size - 1 + output["missing_indices"] = missing_indices + + if self._infrequent_enabled: + self._fit_infrequent_category_mapping( + n_samples, + category_counts, + missing_indices, + ) + return output + + def _transform( + self, + X, + handle_unknown="error", + force_all_finite=True, + warn_on_unknown=False, + ignore_category_indices=None, + ): + X_list, n_samples, n_features = self._check_X( + X, force_all_finite=force_all_finite + ) + self._check_feature_names(X, reset=False) + self._check_n_features(X, reset=False) + + X_int = np.zeros((n_samples, n_features), dtype=int) + X_mask = np.ones((n_samples, n_features), dtype=bool) + + columns_with_unknown = [] + for i in range(n_features): + Xi = X_list[i] + diff, valid_mask = _check_unknown(Xi, self.categories_[i], return_mask=True) + + if not np.all(valid_mask): + if handle_unknown == "error": + msg = ( + "Found unknown categories {0} in column {1}" + " during transform".format(diff, i) + ) + raise ValueError(msg) + else: + if warn_on_unknown: + columns_with_unknown.append(i) + # Set the problematic rows to an acceptable value and + # continue `The rows are marked `X_mask` and will be + # removed later. + X_mask[:, i] = valid_mask + # cast Xi into the largest string type necessary + # to handle different lengths of numpy strings + if ( + self.categories_[i].dtype.kind in ("U", "S") + and self.categories_[i].itemsize > Xi.itemsize + ): + Xi = Xi.astype(self.categories_[i].dtype) + elif self.categories_[i].dtype.kind == "O" and Xi.dtype.kind == "U": + # categories are objects and Xi are numpy strings. + # Cast Xi to an object dtype to prevent truncation + # when setting invalid values. + Xi = Xi.astype("O") + else: + Xi = Xi.copy() + + Xi[~valid_mask] = self.categories_[i][0] + # We use check_unknown=False, since _check_unknown was + # already called above. + X_int[:, i] = _encode(Xi, uniques=self.categories_[i], check_unknown=False) + if columns_with_unknown: + warnings.warn( + ( + "Found unknown categories in columns " + f"{columns_with_unknown} during transform. These " + "unknown categories will be encoded as all zeros" + ), + UserWarning, + ) + + self._map_infrequent_categories(X_int, X_mask, ignore_category_indices) + return X_int, X_mask + + @property + def infrequent_categories_(self): + """Infrequent categories for each feature.""" + # raises an AttributeError if `_infrequent_indices` is not defined + infrequent_indices = self._infrequent_indices + return [ + None if indices is None else category[indices] + for category, indices in zip(self.categories_, infrequent_indices) + ] + + def _check_infrequent_enabled(self): + """ + This functions checks whether _infrequent_enabled is True or False. + This has to be called after parameter validation in the fit function. + """ + max_categories = getattr(self, "max_categories", None) + min_frequency = getattr(self, "min_frequency", None) + self._infrequent_enabled = ( + max_categories is not None and max_categories >= 1 + ) or min_frequency is not None + + def _identify_infrequent(self, category_count, n_samples, col_idx): + """Compute the infrequent indices. + + Parameters + ---------- + category_count : ndarray of shape (n_cardinality,) + Category counts. + + n_samples : int + Number of samples. + + col_idx : int + Index of the current category. Only used for the error message. + + Returns + ------- + output : ndarray of shape (n_infrequent_categories,) or None + If there are infrequent categories, indices of infrequent + categories. Otherwise None. + """ + if isinstance(self.min_frequency, numbers.Integral): + infrequent_mask = category_count < self.min_frequency + elif isinstance(self.min_frequency, numbers.Real): + min_frequency_abs = n_samples * self.min_frequency + infrequent_mask = category_count < min_frequency_abs + else: + infrequent_mask = np.zeros(category_count.shape[0], dtype=bool) + + n_current_features = category_count.size - infrequent_mask.sum() + 1 + if self.max_categories is not None and self.max_categories < n_current_features: + # max_categories includes the one infrequent category + frequent_category_count = self.max_categories - 1 + if frequent_category_count == 0: + # All categories are infrequent + infrequent_mask[:] = True + else: + # stable sort to preserve original count order + smallest_levels = np.argsort(category_count, kind="mergesort")[ + :-frequent_category_count + ] + infrequent_mask[smallest_levels] = True + + output = np.flatnonzero(infrequent_mask) + return output if output.size > 0 else None + + def _fit_infrequent_category_mapping( + self, n_samples, category_counts, missing_indices + ): + """Fit infrequent categories. + + Defines the private attribute: `_default_to_infrequent_mappings`. For + feature `i`, `_default_to_infrequent_mappings[i]` defines the mapping + from the integer encoding returned by `super().transform()` into + infrequent categories. If `_default_to_infrequent_mappings[i]` is None, + there were no infrequent categories in the training set. + + For example if categories 0, 2 and 4 were frequent, while categories + 1, 3, 5 were infrequent for feature 7, then these categories are mapped + to a single output: + `_default_to_infrequent_mappings[7] = array([0, 3, 1, 3, 2, 3])` + + Defines private attribute: `_infrequent_indices`. `_infrequent_indices[i]` + is an array of indices such that + `categories_[i][_infrequent_indices[i]]` are all the infrequent category + labels. If the feature `i` has no infrequent categories + `_infrequent_indices[i]` is None. + + .. versionadded:: 1.1 + + Parameters + ---------- + n_samples : int + Number of samples in training set. + category_counts: list of ndarray + `category_counts[i]` is the category counts corresponding to + `self.categories_[i]`. + missing_indices : dict + Dict mapping from feature_idx to category index with a missing value. + """ + # Remove missing value from counts, so it is not considered as infrequent + if missing_indices: + category_counts_ = [] + for feature_idx, count in enumerate(category_counts): + if feature_idx in missing_indices: + category_counts_.append( + np.delete(count, missing_indices[feature_idx]) + ) + else: + category_counts_.append(count) + else: + category_counts_ = category_counts + + self._infrequent_indices = [ + self._identify_infrequent(category_count, n_samples, col_idx) + for col_idx, category_count in enumerate(category_counts_) + ] + + # compute mapping from default mapping to infrequent mapping + self._default_to_infrequent_mappings = [] + + for feature_idx, infreq_idx in enumerate(self._infrequent_indices): + cats = self.categories_[feature_idx] + # no infrequent categories + if infreq_idx is None: + self._default_to_infrequent_mappings.append(None) + continue + + n_cats = len(cats) + if feature_idx in missing_indices: + # Missing index was removed from this category when computing + # infrequent indices, thus we need to decrease the number of + # total categories when considering the infrequent mapping. + n_cats -= 1 + + # infrequent indices exist + mapping = np.empty(n_cats, dtype=np.int64) + n_infrequent_cats = infreq_idx.size + + # infrequent categories are mapped to the last element. + n_frequent_cats = n_cats - n_infrequent_cats + mapping[infreq_idx] = n_frequent_cats + + frequent_indices = np.setdiff1d(np.arange(n_cats), infreq_idx) + mapping[frequent_indices] = np.arange(n_frequent_cats) + + self._default_to_infrequent_mappings.append(mapping) + + def _map_infrequent_categories(self, X_int, X_mask, ignore_category_indices): + """Map infrequent categories to integer representing the infrequent category. + + This modifies X_int in-place. Values that were invalid based on `X_mask` + are mapped to the infrequent category if there was an infrequent + category for that feature. + + Parameters + ---------- + X_int: ndarray of shape (n_samples, n_features) + Integer encoded categories. + + X_mask: ndarray of shape (n_samples, n_features) + Bool mask for valid values in `X_int`. + + ignore_category_indices : dict + Dictionary mapping from feature_idx to category index to ignore. + Ignored indexes will not be grouped and the original ordinal encoding + will remain. + """ + if not self._infrequent_enabled: + return + + ignore_category_indices = ignore_category_indices or {} + + for col_idx in range(X_int.shape[1]): + infrequent_idx = self._infrequent_indices[col_idx] + if infrequent_idx is None: + continue + + X_int[~X_mask[:, col_idx], col_idx] = infrequent_idx[0] + if self.handle_unknown == "infrequent_if_exist": + # All the unknown values are now mapped to the + # infrequent_idx[0], which makes the unknown values valid + # This is needed in `transform` when the encoding is formed + # using `X_mask`. + X_mask[:, col_idx] = True + + # Remaps encoding in `X_int` where the infrequent categories are + # grouped together. + for i, mapping in enumerate(self._default_to_infrequent_mappings): + if mapping is None: + continue + + if i in ignore_category_indices: + # Update rows that are **not** ignored + rows_to_update = X_int[:, i] != ignore_category_indices[i] + else: + rows_to_update = slice(None) + + X_int[rows_to_update, i] = np.take(mapping, X_int[rows_to_update, i]) + + def _more_tags(self): + return {"X_types": ["2darray", "categorical"], "allow_nan": True} + + +class OneHotEncoder(_BaseEncoder): + """ + Encode categorical features as a one-hot numeric array. + + The input to this transformer should be an array-like of integers or + strings, denoting the values taken on by categorical (discrete) features. + The features are encoded using a one-hot (aka 'one-of-K' or 'dummy') + encoding scheme. This creates a binary column for each category and + returns a sparse matrix or dense array (depending on the ``sparse_output`` + parameter). + + By default, the encoder derives the categories based on the unique values + in each feature. Alternatively, you can also specify the `categories` + manually. + + This encoding is needed for feeding categorical data to many scikit-learn + estimators, notably linear models and SVMs with the standard kernels. + + Note: a one-hot encoding of y labels should use a LabelBinarizer + instead. + + Read more in the :ref:`User Guide `. + For a comparison of different encoders, refer to: + :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`. + + Parameters + ---------- + categories : 'auto' or a list of array-like, default='auto' + Categories (unique values) per feature: + + - 'auto' : Determine categories automatically from the training data. + - list : ``categories[i]`` holds the categories expected in the ith + column. The passed categories should not mix strings and numeric + values within a single feature, and should be sorted in case of + numeric values. + + The used categories can be found in the ``categories_`` attribute. + + .. versionadded:: 0.20 + + drop : {'first', 'if_binary'} or an array-like of shape (n_features,), \ + default=None + Specifies a methodology to use to drop one of the categories per + feature. This is useful in situations where perfectly collinear + features cause problems, such as when feeding the resulting data + into an unregularized linear regression model. + + However, dropping one category breaks the symmetry of the original + representation and can therefore induce a bias in downstream models, + for instance for penalized linear classification or regression models. + + - None : retain all features (the default). + - 'first' : drop the first category in each feature. If only one + category is present, the feature will be dropped entirely. + - 'if_binary' : drop the first category in each feature with two + categories. Features with 1 or more than 2 categories are + left intact. + - array : ``drop[i]`` is the category in feature ``X[:, i]`` that + should be dropped. + + When `max_categories` or `min_frequency` is configured to group + infrequent categories, the dropping behavior is handled after the + grouping. + + .. versionadded:: 0.21 + The parameter `drop` was added in 0.21. + + .. versionchanged:: 0.23 + The option `drop='if_binary'` was added in 0.23. + + .. versionchanged:: 1.1 + Support for dropping infrequent categories. + + sparse_output : bool, default=True + When ``True``, it returns a :class:`scipy.sparse.csr_matrix`, + i.e. a sparse matrix in "Compressed Sparse Row" (CSR) format. + + .. versionadded:: 1.2 + `sparse` was renamed to `sparse_output` + + dtype : number type, default=np.float64 + Desired dtype of output. + + handle_unknown : {'error', 'ignore', 'infrequent_if_exist'}, \ + default='error' + Specifies the way unknown categories are handled during :meth:`transform`. + + - 'error' : Raise an error if an unknown category is present during transform. + - 'ignore' : When an unknown category is encountered during + transform, the resulting one-hot encoded columns for this feature + will be all zeros. In the inverse transform, an unknown category + will be denoted as None. + - 'infrequent_if_exist' : When an unknown category is encountered + during transform, the resulting one-hot encoded columns for this + feature will map to the infrequent category if it exists. The + infrequent category will be mapped to the last position in the + encoding. During inverse transform, an unknown category will be + mapped to the category denoted `'infrequent'` if it exists. If the + `'infrequent'` category does not exist, then :meth:`transform` and + :meth:`inverse_transform` will handle an unknown category as with + `handle_unknown='ignore'`. Infrequent categories exist based on + `min_frequency` and `max_categories`. Read more in the + :ref:`User Guide `. + + .. versionchanged:: 1.1 + `'infrequent_if_exist'` was added to automatically handle unknown + categories and infrequent categories. + + min_frequency : int or float, default=None + Specifies the minimum frequency below which a category will be + considered infrequent. + + - If `int`, categories with a smaller cardinality will be considered + infrequent. + + - If `float`, categories with a smaller cardinality than + `min_frequency * n_samples` will be considered infrequent. + + .. versionadded:: 1.1 + Read more in the :ref:`User Guide `. + + max_categories : int, default=None + Specifies an upper limit to the number of output features for each input + feature when considering infrequent categories. If there are infrequent + categories, `max_categories` includes the category representing the + infrequent categories along with the frequent categories. If `None`, + there is no limit to the number of output features. + + .. versionadded:: 1.1 + Read more in the :ref:`User Guide `. + + feature_name_combiner : "concat" or callable, default="concat" + Callable with signature `def callable(input_feature, category)` that returns a + string. This is used to create feature names to be returned by + :meth:`get_feature_names_out`. + + `"concat"` concatenates encoded feature name and category with + `feature + "_" + str(category)`.E.g. feature X with values 1, 6, 7 create + feature names `X_1, X_6, X_7`. + + .. versionadded:: 1.3 + + Attributes + ---------- + categories_ : list of arrays + The categories of each feature determined during fitting + (in order of the features in X and corresponding with the output + of ``transform``). This includes the category specified in ``drop`` + (if any). + + drop_idx_ : array of shape (n_features,) + - ``drop_idx_[i]`` is the index in ``categories_[i]`` of the category + to be dropped for each feature. + - ``drop_idx_[i] = None`` if no category is to be dropped from the + feature with index ``i``, e.g. when `drop='if_binary'` and the + feature isn't binary. + - ``drop_idx_ = None`` if all the transformed features will be + retained. + + If infrequent categories are enabled by setting `min_frequency` or + `max_categories` to a non-default value and `drop_idx[i]` corresponds + to a infrequent category, then the entire infrequent category is + dropped. + + .. versionchanged:: 0.23 + Added the possibility to contain `None` values. + + infrequent_categories_ : list of ndarray + Defined only if infrequent categories are enabled by setting + `min_frequency` or `max_categories` to a non-default value. + `infrequent_categories_[i]` are the infrequent categories for feature + `i`. If the feature `i` has no infrequent categories + `infrequent_categories_[i]` is None. + + .. versionadded:: 1.1 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 1.0 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + feature_name_combiner : callable or None + Callable with signature `def callable(input_feature, category)` that returns a + string. This is used to create feature names to be returned by + :meth:`get_feature_names_out`. + + .. versionadded:: 1.3 + + See Also + -------- + OrdinalEncoder : Performs an ordinal (integer) + encoding of the categorical features. + TargetEncoder : Encodes categorical features using the target. + sklearn.feature_extraction.DictVectorizer : Performs a one-hot encoding of + dictionary items (also handles string-valued features). + sklearn.feature_extraction.FeatureHasher : Performs an approximate one-hot + encoding of dictionary items or strings. + LabelBinarizer : Binarizes labels in a one-vs-all + fashion. + MultiLabelBinarizer : Transforms between iterable of + iterables and a multilabel format, e.g. a (samples x classes) binary + matrix indicating the presence of a class label. + + Examples + -------- + Given a dataset with two features, we let the encoder find the unique + values per feature and transform the data to a binary one-hot encoding. + + >>> from sklearn.preprocessing import OneHotEncoder + + One can discard categories not seen during `fit`: + + >>> enc = OneHotEncoder(handle_unknown='ignore') + >>> X = [['Male', 1], ['Female', 3], ['Female', 2]] + >>> enc.fit(X) + OneHotEncoder(handle_unknown='ignore') + >>> enc.categories_ + [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)] + >>> enc.transform([['Female', 1], ['Male', 4]]).toarray() + array([[1., 0., 1., 0., 0.], + [0., 1., 0., 0., 0.]]) + >>> enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]]) + array([['Male', 1], + [None, 2]], dtype=object) + >>> enc.get_feature_names_out(['gender', 'group']) + array(['gender_Female', 'gender_Male', 'group_1', 'group_2', 'group_3'], ...) + + One can always drop the first column for each feature: + + >>> drop_enc = OneHotEncoder(drop='first').fit(X) + >>> drop_enc.categories_ + [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)] + >>> drop_enc.transform([['Female', 1], ['Male', 2]]).toarray() + array([[0., 0., 0.], + [1., 1., 0.]]) + + Or drop a column for feature only having 2 categories: + + >>> drop_binary_enc = OneHotEncoder(drop='if_binary').fit(X) + >>> drop_binary_enc.transform([['Female', 1], ['Male', 2]]).toarray() + array([[0., 1., 0., 0.], + [1., 0., 1., 0.]]) + + One can change the way feature names are created. + + >>> def custom_combiner(feature, category): + ... return str(feature) + "_" + type(category).__name__ + "_" + str(category) + >>> custom_fnames_enc = OneHotEncoder(feature_name_combiner=custom_combiner).fit(X) + >>> custom_fnames_enc.get_feature_names_out() + array(['x0_str_Female', 'x0_str_Male', 'x1_int_1', 'x1_int_2', 'x1_int_3'], + dtype=object) + + Infrequent categories are enabled by setting `max_categories` or `min_frequency`. + + >>> import numpy as np + >>> X = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object).T + >>> ohe = OneHotEncoder(max_categories=3, sparse_output=False).fit(X) + >>> ohe.infrequent_categories_ + [array(['a', 'd'], dtype=object)] + >>> ohe.transform([["a"], ["b"]]) + array([[0., 0., 1.], + [1., 0., 0.]]) + """ + + _parameter_constraints: dict = { + "categories": [StrOptions({"auto"}), list], + "drop": [StrOptions({"first", "if_binary"}), "array-like", None], + "dtype": "no_validation", # validation delegated to numpy + "handle_unknown": [StrOptions({"error", "ignore", "infrequent_if_exist"})], + "max_categories": [Interval(Integral, 1, None, closed="left"), None], + "min_frequency": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="neither"), + None, + ], + "sparse_output": ["boolean"], + "feature_name_combiner": [StrOptions({"concat"}), callable], + } + + def __init__( + self, + *, + categories="auto", + drop=None, + sparse_output=True, + dtype=np.float64, + handle_unknown="error", + min_frequency=None, + max_categories=None, + feature_name_combiner="concat", + ): + self.categories = categories + self.sparse_output = sparse_output + self.dtype = dtype + self.handle_unknown = handle_unknown + self.drop = drop + self.min_frequency = min_frequency + self.max_categories = max_categories + self.feature_name_combiner = feature_name_combiner + + def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx): + """Convert `drop_idx` into the index for infrequent categories. + + If there are no infrequent categories, then `drop_idx` is + returned. This method is called in `_set_drop_idx` when the `drop` + parameter is an array-like. + """ + if not self._infrequent_enabled: + return drop_idx + + default_to_infrequent = self._default_to_infrequent_mappings[feature_idx] + if default_to_infrequent is None: + return drop_idx + + # Raise error when explicitly dropping a category that is infrequent + infrequent_indices = self._infrequent_indices[feature_idx] + if infrequent_indices is not None and drop_idx in infrequent_indices: + categories = self.categories_[feature_idx] + raise ValueError( + f"Unable to drop category {categories[drop_idx].item()!r} from" + f" feature {feature_idx} because it is infrequent" + ) + return default_to_infrequent[drop_idx] + + def _set_drop_idx(self): + """Compute the drop indices associated with `self.categories_`. + + If `self.drop` is: + - `None`, No categories have been dropped. + - `'first'`, All zeros to drop the first category. + - `'if_binary'`, All zeros if the category is binary and `None` + otherwise. + - array-like, The indices of the categories that match the + categories in `self.drop`. If the dropped category is an infrequent + category, then the index for the infrequent category is used. This + means that the entire infrequent category is dropped. + + This methods defines a public `drop_idx_` and a private + `_drop_idx_after_grouping`. + + - `drop_idx_`: Public facing API that references the drop category in + `self.categories_`. + - `_drop_idx_after_grouping`: Used internally to drop categories *after* the + infrequent categories are grouped together. + + If there are no infrequent categories or drop is `None`, then + `drop_idx_=_drop_idx_after_grouping`. + """ + if self.drop is None: + drop_idx_after_grouping = None + elif isinstance(self.drop, str): + if self.drop == "first": + drop_idx_after_grouping = np.zeros(len(self.categories_), dtype=object) + elif self.drop == "if_binary": + n_features_out_no_drop = [len(cat) for cat in self.categories_] + if self._infrequent_enabled: + for i, infreq_idx in enumerate(self._infrequent_indices): + if infreq_idx is None: + continue + n_features_out_no_drop[i] -= infreq_idx.size - 1 + + drop_idx_after_grouping = np.array( + [ + 0 if n_features_out == 2 else None + for n_features_out in n_features_out_no_drop + ], + dtype=object, + ) + + else: + drop_array = np.asarray(self.drop, dtype=object) + droplen = len(drop_array) + + if droplen != len(self.categories_): + msg = ( + "`drop` should have length equal to the number " + "of features ({}), got {}" + ) + raise ValueError(msg.format(len(self.categories_), droplen)) + missing_drops = [] + drop_indices = [] + for feature_idx, (drop_val, cat_list) in enumerate( + zip(drop_array, self.categories_) + ): + if not is_scalar_nan(drop_val): + drop_idx = np.where(cat_list == drop_val)[0] + if drop_idx.size: # found drop idx + drop_indices.append( + self._map_drop_idx_to_infrequent(feature_idx, drop_idx[0]) + ) + else: + missing_drops.append((feature_idx, drop_val)) + continue + + # drop_val is nan, find nan in categories manually + if is_scalar_nan(cat_list[-1]): + drop_indices.append( + self._map_drop_idx_to_infrequent(feature_idx, cat_list.size - 1) + ) + else: # nan is missing + missing_drops.append((feature_idx, drop_val)) + + if any(missing_drops): + msg = ( + "The following categories were supposed to be " + "dropped, but were not found in the training " + "data.\n{}".format( + "\n".join( + [ + "Category: {}, Feature: {}".format(c, v) + for c, v in missing_drops + ] + ) + ) + ) + raise ValueError(msg) + drop_idx_after_grouping = np.array(drop_indices, dtype=object) + + # `_drop_idx_after_grouping` are the categories to drop *after* the infrequent + # categories are grouped together. If needed, we remap `drop_idx` back + # to the categories seen in `self.categories_`. + self._drop_idx_after_grouping = drop_idx_after_grouping + + if not self._infrequent_enabled or drop_idx_after_grouping is None: + self.drop_idx_ = self._drop_idx_after_grouping + else: + drop_idx_ = [] + for feature_idx, drop_idx in enumerate(drop_idx_after_grouping): + default_to_infrequent = self._default_to_infrequent_mappings[ + feature_idx + ] + if drop_idx is None or default_to_infrequent is None: + orig_drop_idx = drop_idx + else: + orig_drop_idx = np.flatnonzero(default_to_infrequent == drop_idx)[0] + + drop_idx_.append(orig_drop_idx) + + self.drop_idx_ = np.asarray(drop_idx_, dtype=object) + + def _compute_transformed_categories(self, i, remove_dropped=True): + """Compute the transformed categories used for column `i`. + + 1. If there are infrequent categories, the category is named + 'infrequent_sklearn'. + 2. Dropped columns are removed when remove_dropped=True. + """ + cats = self.categories_[i] + + if self._infrequent_enabled: + infreq_map = self._default_to_infrequent_mappings[i] + if infreq_map is not None: + frequent_mask = infreq_map < infreq_map.max() + infrequent_cat = "infrequent_sklearn" + # infrequent category is always at the end + cats = np.concatenate( + (cats[frequent_mask], np.array([infrequent_cat], dtype=object)) + ) + + if remove_dropped: + cats = self._remove_dropped_categories(cats, i) + return cats + + def _remove_dropped_categories(self, categories, i): + """Remove dropped categories.""" + if ( + self._drop_idx_after_grouping is not None + and self._drop_idx_after_grouping[i] is not None + ): + return np.delete(categories, self._drop_idx_after_grouping[i]) + return categories + + def _compute_n_features_outs(self): + """Compute the n_features_out for each input feature.""" + output = [len(cats) for cats in self.categories_] + + if self._drop_idx_after_grouping is not None: + for i, drop_idx in enumerate(self._drop_idx_after_grouping): + if drop_idx is not None: + output[i] -= 1 + + if not self._infrequent_enabled: + return output + + # infrequent is enabled, the number of features out are reduced + # because the infrequent categories are grouped together + for i, infreq_idx in enumerate(self._infrequent_indices): + if infreq_idx is None: + continue + output[i] -= infreq_idx.size - 1 + + return output + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """ + Fit OneHotEncoder to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to determine the categories of each feature. + + y : None + Ignored. This parameter exists only for compatibility with + :class:`~sklearn.pipeline.Pipeline`. + + Returns + ------- + self + Fitted encoder. + """ + self._fit( + X, + handle_unknown=self.handle_unknown, + force_all_finite="allow-nan", + ) + self._set_drop_idx() + self._n_features_outs = self._compute_n_features_outs() + return self + + def transform(self, X): + """ + Transform X using one-hot encoding. + + If `sparse_output=True` (default), it returns an instance of + :class:`scipy.sparse._csr.csr_matrix` (CSR format). + + If there are infrequent categories for a feature, set by specifying + `max_categories` or `min_frequency`, the infrequent categories are + grouped into a single category. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to encode. + + Returns + ------- + X_out : {ndarray, sparse matrix} of shape \ + (n_samples, n_encoded_features) + Transformed input. If `sparse_output=True`, a sparse matrix will be + returned. + """ + check_is_fitted(self) + transform_output = _get_output_config("transform", estimator=self)["dense"] + if transform_output != "default" and self.sparse_output: + capitalize_transform_output = transform_output.capitalize() + raise ValueError( + f"{capitalize_transform_output} output does not support sparse data." + f" Set sparse_output=False to output {transform_output} dataframes or" + f" disable {capitalize_transform_output} output via" + '` ohe.set_output(transform="default").' + ) + + # validation of X happens in _check_X called by _transform + warn_on_unknown = self.drop is not None and self.handle_unknown in { + "ignore", + "infrequent_if_exist", + } + X_int, X_mask = self._transform( + X, + handle_unknown=self.handle_unknown, + force_all_finite="allow-nan", + warn_on_unknown=warn_on_unknown, + ) + + n_samples, n_features = X_int.shape + + if self._drop_idx_after_grouping is not None: + to_drop = self._drop_idx_after_grouping.copy() + # We remove all the dropped categories from mask, and decrement all + # categories that occur after them to avoid an empty column. + keep_cells = X_int != to_drop + for i, cats in enumerate(self.categories_): + # drop='if_binary' but feature isn't binary + if to_drop[i] is None: + # set to cardinality to not drop from X_int + to_drop[i] = len(cats) + + to_drop = to_drop.reshape(1, -1) + X_int[X_int > to_drop] -= 1 + X_mask &= keep_cells + + mask = X_mask.ravel() + feature_indices = np.cumsum([0] + self._n_features_outs) + indices = (X_int + feature_indices[:-1]).ravel()[mask] + + indptr = np.empty(n_samples + 1, dtype=int) + indptr[0] = 0 + np.sum(X_mask, axis=1, out=indptr[1:], dtype=indptr.dtype) + np.cumsum(indptr[1:], out=indptr[1:]) + data = np.ones(indptr[-1]) + + out = sparse.csr_matrix( + (data, indices, indptr), + shape=(n_samples, feature_indices[-1]), + dtype=self.dtype, + ) + if not self.sparse_output: + return out.toarray() + else: + return out + + def inverse_transform(self, X): + """ + Convert the data back to the original representation. + + When unknown categories are encountered (all zeros in the + one-hot encoding), ``None`` is used to represent this category. If the + feature with the unknown category has a dropped category, the dropped + category will be its inverse. + + For a given input feature, if there is an infrequent category, + 'infrequent_sklearn' will be used to represent the infrequent category. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape \ + (n_samples, n_encoded_features) + The transformed data. + + Returns + ------- + X_tr : ndarray of shape (n_samples, n_features) + Inverse transformed array. + """ + check_is_fitted(self) + X = check_array(X, accept_sparse="csr") + + n_samples, _ = X.shape + n_features = len(self.categories_) + + n_features_out = np.sum(self._n_features_outs) + + # validate shape of passed X + msg = ( + "Shape of the passed X data is not correct. Expected {0} columns, got {1}." + ) + if X.shape[1] != n_features_out: + raise ValueError(msg.format(n_features_out, X.shape[1])) + + transformed_features = [ + self._compute_transformed_categories(i, remove_dropped=False) + for i, _ in enumerate(self.categories_) + ] + + # create resulting array of appropriate dtype + dt = np.result_type(*[cat.dtype for cat in transformed_features]) + X_tr = np.empty((n_samples, n_features), dtype=dt) + + j = 0 + found_unknown = {} + + if self._infrequent_enabled: + infrequent_indices = self._infrequent_indices + else: + infrequent_indices = [None] * n_features + + for i in range(n_features): + cats_wo_dropped = self._remove_dropped_categories( + transformed_features[i], i + ) + n_categories = cats_wo_dropped.shape[0] + + # Only happens if there was a column with a unique + # category. In this case we just fill the column with this + # unique category value. + if n_categories == 0: + X_tr[:, i] = self.categories_[i][self._drop_idx_after_grouping[i]] + j += n_categories + continue + sub = X[:, j : j + n_categories] + # for sparse X argmax returns 2D matrix, ensure 1D array + labels = np.asarray(sub.argmax(axis=1)).flatten() + X_tr[:, i] = cats_wo_dropped[labels] + + if self.handle_unknown == "ignore" or ( + self.handle_unknown == "infrequent_if_exist" + and infrequent_indices[i] is None + ): + unknown = np.asarray(sub.sum(axis=1) == 0).flatten() + # ignored unknown categories: we have a row of all zero + if unknown.any(): + # if categories were dropped then unknown categories will + # be mapped to the dropped category + if ( + self._drop_idx_after_grouping is None + or self._drop_idx_after_grouping[i] is None + ): + found_unknown[i] = unknown + else: + X_tr[unknown, i] = self.categories_[i][ + self._drop_idx_after_grouping[i] + ] + else: + dropped = np.asarray(sub.sum(axis=1) == 0).flatten() + if dropped.any(): + if self._drop_idx_after_grouping is None: + all_zero_samples = np.flatnonzero(dropped) + raise ValueError( + f"Samples {all_zero_samples} can not be inverted " + "when drop=None and handle_unknown='error' " + "because they contain all zeros" + ) + # we can safely assume that all of the nulls in each column + # are the dropped value + drop_idx = self._drop_idx_after_grouping[i] + X_tr[dropped, i] = transformed_features[i][drop_idx] + + j += n_categories + + # if ignored are found: potentially need to upcast result to + # insert None values + if found_unknown: + if X_tr.dtype != object: + X_tr = X_tr.astype(object) + + for idx, mask in found_unknown.items(): + X_tr[mask, idx] = None + + return X_tr + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self) + input_features = _check_feature_names_in(self, input_features) + cats = [ + self._compute_transformed_categories(i) + for i, _ in enumerate(self.categories_) + ] + + name_combiner = self._check_get_feature_name_combiner() + feature_names = [] + for i in range(len(cats)): + names = [name_combiner(input_features[i], t) for t in cats[i]] + feature_names.extend(names) + + return np.array(feature_names, dtype=object) + + def _check_get_feature_name_combiner(self): + if self.feature_name_combiner == "concat": + return lambda feature, category: feature + "_" + str(category) + else: # callable + dry_run_combiner = self.feature_name_combiner("feature", "category") + if not isinstance(dry_run_combiner, str): + raise TypeError( + "When `feature_name_combiner` is a callable, it should return a " + f"Python string. Got {type(dry_run_combiner)} instead." + ) + return self.feature_name_combiner + + +class OrdinalEncoder(OneToOneFeatureMixin, _BaseEncoder): + """ + Encode categorical features as an integer array. + + The input to this transformer should be an array-like of integers or + strings, denoting the values taken on by categorical (discrete) features. + The features are converted to ordinal integers. This results in + a single column of integers (0 to n_categories - 1) per feature. + + Read more in the :ref:`User Guide `. + For a comparison of different encoders, refer to: + :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`. + + .. versionadded:: 0.20 + + Parameters + ---------- + categories : 'auto' or a list of array-like, default='auto' + Categories (unique values) per feature: + + - 'auto' : Determine categories automatically from the training data. + - list : ``categories[i]`` holds the categories expected in the ith + column. The passed categories should not mix strings and numeric + values, and should be sorted in case of numeric values. + + The used categories can be found in the ``categories_`` attribute. + + dtype : number type, default=np.float64 + Desired dtype of output. + + handle_unknown : {'error', 'use_encoded_value'}, default='error' + When set to 'error' an error will be raised in case an unknown + categorical feature is present during transform. When set to + 'use_encoded_value', the encoded value of unknown categories will be + set to the value given for the parameter `unknown_value`. In + :meth:`inverse_transform`, an unknown category will be denoted as None. + + .. versionadded:: 0.24 + + unknown_value : int or np.nan, default=None + When the parameter handle_unknown is set to 'use_encoded_value', this + parameter is required and will set the encoded value of unknown + categories. It has to be distinct from the values used to encode any of + the categories in `fit`. If set to np.nan, the `dtype` parameter must + be a float dtype. + + .. versionadded:: 0.24 + + encoded_missing_value : int or np.nan, default=np.nan + Encoded value of missing categories. If set to `np.nan`, then the `dtype` + parameter must be a float dtype. + + .. versionadded:: 1.1 + + min_frequency : int or float, default=None + Specifies the minimum frequency below which a category will be + considered infrequent. + + - If `int`, categories with a smaller cardinality will be considered + infrequent. + + - If `float`, categories with a smaller cardinality than + `min_frequency * n_samples` will be considered infrequent. + + .. versionadded:: 1.3 + Read more in the :ref:`User Guide `. + + max_categories : int, default=None + Specifies an upper limit to the number of output categories for each input + feature when considering infrequent categories. If there are infrequent + categories, `max_categories` includes the category representing the + infrequent categories along with the frequent categories. If `None`, + there is no limit to the number of output features. + + `max_categories` do **not** take into account missing or unknown + categories. Setting `unknown_value` or `encoded_missing_value` to an + integer will increase the number of unique integer codes by one each. + This can result in up to `max_categories + 2` integer codes. + + .. versionadded:: 1.3 + Read more in the :ref:`User Guide `. + + Attributes + ---------- + categories_ : list of arrays + The categories of each feature determined during ``fit`` (in order of + the features in X and corresponding with the output of ``transform``). + This does not include categories that weren't seen during ``fit``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 1.0 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + infrequent_categories_ : list of ndarray + Defined only if infrequent categories are enabled by setting + `min_frequency` or `max_categories` to a non-default value. + `infrequent_categories_[i]` are the infrequent categories for feature + `i`. If the feature `i` has no infrequent categories + `infrequent_categories_[i]` is None. + + .. versionadded:: 1.3 + + See Also + -------- + OneHotEncoder : Performs a one-hot encoding of categorical features. This encoding + is suitable for low to medium cardinality categorical variables, both in + supervised and unsupervised settings. + TargetEncoder : Encodes categorical features using supervised signal + in a classification or regression pipeline. This encoding is typically + suitable for high cardinality categorical variables. + LabelEncoder : Encodes target labels with values between 0 and + ``n_classes-1``. + + Notes + ----- + With a high proportion of `nan` values, inferring categories becomes slow with + Python versions before 3.10. The handling of `nan` values was improved + from Python 3.10 onwards, (c.f. + `bpo-43475 `_). + + Examples + -------- + Given a dataset with two features, we let the encoder find the unique + values per feature and transform the data to an ordinal encoding. + + >>> from sklearn.preprocessing import OrdinalEncoder + >>> enc = OrdinalEncoder() + >>> X = [['Male', 1], ['Female', 3], ['Female', 2]] + >>> enc.fit(X) + OrdinalEncoder() + >>> enc.categories_ + [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)] + >>> enc.transform([['Female', 3], ['Male', 1]]) + array([[0., 2.], + [1., 0.]]) + + >>> enc.inverse_transform([[1, 0], [0, 1]]) + array([['Male', 1], + ['Female', 2]], dtype=object) + + By default, :class:`OrdinalEncoder` is lenient towards missing values by + propagating them. + + >>> import numpy as np + >>> X = [['Male', 1], ['Female', 3], ['Female', np.nan]] + >>> enc.fit_transform(X) + array([[ 1., 0.], + [ 0., 1.], + [ 0., nan]]) + + You can use the parameter `encoded_missing_value` to encode missing values. + + >>> enc.set_params(encoded_missing_value=-1).fit_transform(X) + array([[ 1., 0.], + [ 0., 1.], + [ 0., -1.]]) + + Infrequent categories are enabled by setting `max_categories` or `min_frequency`. + In the following example, "a" and "d" are considered infrequent and grouped + together into a single category, "b" and "c" are their own categories, unknown + values are encoded as 3 and missing values are encoded as 4. + + >>> X_train = np.array( + ... [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3 + [np.nan]], + ... dtype=object).T + >>> enc = OrdinalEncoder( + ... handle_unknown="use_encoded_value", unknown_value=3, + ... max_categories=3, encoded_missing_value=4) + >>> _ = enc.fit(X_train) + >>> X_test = np.array([["a"], ["b"], ["c"], ["d"], ["e"], [np.nan]], dtype=object) + >>> enc.transform(X_test) + array([[2.], + [0.], + [1.], + [2.], + [3.], + [4.]]) + """ + + _parameter_constraints: dict = { + "categories": [StrOptions({"auto"}), list], + "dtype": "no_validation", # validation delegated to numpy + "encoded_missing_value": [Integral, type(np.nan)], + "handle_unknown": [StrOptions({"error", "use_encoded_value"})], + "unknown_value": [Integral, type(np.nan), None], + "max_categories": [Interval(Integral, 1, None, closed="left"), None], + "min_frequency": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="neither"), + None, + ], + } + + def __init__( + self, + *, + categories="auto", + dtype=np.float64, + handle_unknown="error", + unknown_value=None, + encoded_missing_value=np.nan, + min_frequency=None, + max_categories=None, + ): + self.categories = categories + self.dtype = dtype + self.handle_unknown = handle_unknown + self.unknown_value = unknown_value + self.encoded_missing_value = encoded_missing_value + self.min_frequency = min_frequency + self.max_categories = max_categories + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """ + Fit the OrdinalEncoder to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to determine the categories of each feature. + + y : None + Ignored. This parameter exists only for compatibility with + :class:`~sklearn.pipeline.Pipeline`. + + Returns + ------- + self : object + Fitted encoder. + """ + if self.handle_unknown == "use_encoded_value": + if is_scalar_nan(self.unknown_value): + if np.dtype(self.dtype).kind != "f": + raise ValueError( + "When unknown_value is np.nan, the dtype " + "parameter should be " + f"a float dtype. Got {self.dtype}." + ) + elif not isinstance(self.unknown_value, numbers.Integral): + raise TypeError( + "unknown_value should be an integer or " + "np.nan when " + "handle_unknown is 'use_encoded_value', " + f"got {self.unknown_value}." + ) + elif self.unknown_value is not None: + raise TypeError( + "unknown_value should only be set when " + "handle_unknown is 'use_encoded_value', " + f"got {self.unknown_value}." + ) + + # `_fit` will only raise an error when `self.handle_unknown="error"` + fit_results = self._fit( + X, + handle_unknown=self.handle_unknown, + force_all_finite="allow-nan", + return_and_ignore_missing_for_infrequent=True, + ) + self._missing_indices = fit_results["missing_indices"] + + cardinalities = [len(categories) for categories in self.categories_] + if self._infrequent_enabled: + # Cardinality decreases because the infrequent categories are grouped + # together + for feature_idx, infrequent in enumerate(self.infrequent_categories_): + if infrequent is not None: + cardinalities[feature_idx] -= len(infrequent) + + # missing values are not considered part of the cardinality + # when considering unknown categories or encoded_missing_value + for cat_idx, categories_for_idx in enumerate(self.categories_): + if is_scalar_nan(categories_for_idx[-1]): + cardinalities[cat_idx] -= 1 + + if self.handle_unknown == "use_encoded_value": + for cardinality in cardinalities: + if 0 <= self.unknown_value < cardinality: + raise ValueError( + "The used value for unknown_value " + f"{self.unknown_value} is one of the " + "values already used for encoding the " + "seen categories." + ) + + if self._missing_indices: + if np.dtype(self.dtype).kind != "f" and is_scalar_nan( + self.encoded_missing_value + ): + raise ValueError( + "There are missing values in features " + f"{list(self._missing_indices)}. For OrdinalEncoder to " + f"encode missing values with dtype: {self.dtype}, set " + "encoded_missing_value to a non-nan value, or " + "set dtype to a float" + ) + + if not is_scalar_nan(self.encoded_missing_value): + # Features are invalid when they contain a missing category + # and encoded_missing_value was already used to encode a + # known category + invalid_features = [ + cat_idx + for cat_idx, cardinality in enumerate(cardinalities) + if cat_idx in self._missing_indices + and 0 <= self.encoded_missing_value < cardinality + ] + + if invalid_features: + # Use feature names if they are available + if hasattr(self, "feature_names_in_"): + invalid_features = self.feature_names_in_[invalid_features] + raise ValueError( + f"encoded_missing_value ({self.encoded_missing_value}) " + "is already used to encode a known category in features: " + f"{invalid_features}" + ) + + return self + + def transform(self, X): + """ + Transform X to ordinal codes. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to encode. + + Returns + ------- + X_out : ndarray of shape (n_samples, n_features) + Transformed input. + """ + check_is_fitted(self, "categories_") + X_int, X_mask = self._transform( + X, + handle_unknown=self.handle_unknown, + force_all_finite="allow-nan", + ignore_category_indices=self._missing_indices, + ) + X_trans = X_int.astype(self.dtype, copy=False) + + for cat_idx, missing_idx in self._missing_indices.items(): + X_missing_mask = X_int[:, cat_idx] == missing_idx + X_trans[X_missing_mask, cat_idx] = self.encoded_missing_value + + # create separate category for unknown values + if self.handle_unknown == "use_encoded_value": + X_trans[~X_mask] = self.unknown_value + return X_trans + + def inverse_transform(self, X): + """ + Convert the data back to the original representation. + + Parameters + ---------- + X : array-like of shape (n_samples, n_encoded_features) + The transformed data. + + Returns + ------- + X_tr : ndarray of shape (n_samples, n_features) + Inverse transformed array. + """ + check_is_fitted(self) + X = check_array(X, force_all_finite="allow-nan") + + n_samples, _ = X.shape + n_features = len(self.categories_) + + # validate shape of passed X + msg = ( + "Shape of the passed X data is not correct. Expected {0} columns, got {1}." + ) + if X.shape[1] != n_features: + raise ValueError(msg.format(n_features, X.shape[1])) + + # create resulting array of appropriate dtype + dt = np.result_type(*[cat.dtype for cat in self.categories_]) + X_tr = np.empty((n_samples, n_features), dtype=dt) + + found_unknown = {} + infrequent_masks = {} + + infrequent_indices = getattr(self, "_infrequent_indices", None) + + for i in range(n_features): + labels = X[:, i] + + # replace values of X[:, i] that were nan with actual indices + if i in self._missing_indices: + X_i_mask = _get_mask(labels, self.encoded_missing_value) + labels[X_i_mask] = self._missing_indices[i] + + rows_to_update = slice(None) + categories = self.categories_[i] + + if infrequent_indices is not None and infrequent_indices[i] is not None: + # Compute mask for frequent categories + infrequent_encoding_value = len(categories) - len(infrequent_indices[i]) + infrequent_masks[i] = labels == infrequent_encoding_value + rows_to_update = ~infrequent_masks[i] + + # Remap categories to be only frequent categories. The infrequent + # categories will be mapped to "infrequent_sklearn" later + frequent_categories_mask = np.ones_like(categories, dtype=bool) + frequent_categories_mask[infrequent_indices[i]] = False + categories = categories[frequent_categories_mask] + + if self.handle_unknown == "use_encoded_value": + unknown_labels = _get_mask(labels, self.unknown_value) + found_unknown[i] = unknown_labels + + known_labels = ~unknown_labels + if isinstance(rows_to_update, np.ndarray): + rows_to_update &= known_labels + else: + rows_to_update = known_labels + + labels_int = labels[rows_to_update].astype("int64", copy=False) + X_tr[rows_to_update, i] = categories[labels_int] + + if found_unknown or infrequent_masks: + X_tr = X_tr.astype(object, copy=False) + + # insert None values for unknown values + if found_unknown: + for idx, mask in found_unknown.items(): + X_tr[mask, idx] = None + + if infrequent_masks: + for idx, mask in infrequent_masks.items(): + X_tr[mask, idx] = "infrequent_sklearn" + + return X_tr diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_function_transformer.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_function_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..921bd6a01fb713e9452fd7a934f48b22295ed1b1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_function_transformer.py @@ -0,0 +1,431 @@ +import warnings + +import numpy as np + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils._param_validation import StrOptions +from ..utils._set_output import ADAPTERS_MANAGER, _get_output_config +from ..utils.metaestimators import available_if +from ..utils.validation import ( + _allclose_dense_sparse, + _check_feature_names_in, + _get_feature_names, + _is_pandas_df, + _is_polars_df, + check_array, +) + + +def _get_adapter_from_container(container): + """Get the adapter that nows how to handle such container. + + See :class:`sklearn.utils._set_output.ContainerAdapterProtocol` for more + details. + """ + module_name = container.__class__.__module__.split(".")[0] + try: + return ADAPTERS_MANAGER.adapters[module_name] + except KeyError as exc: + available_adapters = list(ADAPTERS_MANAGER.adapters.keys()) + raise ValueError( + "The container does not have a registered adapter in scikit-learn. " + f"Available adapters are: {available_adapters} while the container " + f"provided is: {container!r}." + ) from exc + + +def _identity(X): + """The identity function.""" + return X + + +class FunctionTransformer(TransformerMixin, BaseEstimator): + """Constructs a transformer from an arbitrary callable. + + A FunctionTransformer forwards its X (and optionally y) arguments to a + user-defined function or function object and returns the result of this + function. This is useful for stateless transformations such as taking the + log of frequencies, doing custom scaling, etc. + + Note: If a lambda is used as the function, then the resulting + transformer will not be pickleable. + + .. versionadded:: 0.17 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + func : callable, default=None + The callable to use for the transformation. This will be passed + the same arguments as transform, with args and kwargs forwarded. + If func is None, then func will be the identity function. + + inverse_func : callable, default=None + The callable to use for the inverse transformation. This will be + passed the same arguments as inverse transform, with args and + kwargs forwarded. If inverse_func is None, then inverse_func + will be the identity function. + + validate : bool, default=False + Indicate that the input X array should be checked before calling + ``func``. The possibilities are: + + - If False, there is no input validation. + - If True, then X will be converted to a 2-dimensional NumPy array or + sparse matrix. If the conversion is not possible an exception is + raised. + + .. versionchanged:: 0.22 + The default of ``validate`` changed from True to False. + + accept_sparse : bool, default=False + Indicate that func accepts a sparse matrix as input. If validate is + False, this has no effect. Otherwise, if accept_sparse is false, + sparse matrix inputs will cause an exception to be raised. + + check_inverse : bool, default=True + Whether to check that or ``func`` followed by ``inverse_func`` leads to + the original inputs. It can be used for a sanity check, raising a + warning when the condition is not fulfilled. + + .. versionadded:: 0.20 + + feature_names_out : callable, 'one-to-one' or None, default=None + Determines the list of feature names that will be returned by the + `get_feature_names_out` method. If it is 'one-to-one', then the output + feature names will be equal to the input feature names. If it is a + callable, then it must take two positional arguments: this + `FunctionTransformer` (`self`) and an array-like of input feature names + (`input_features`). It must return an array-like of output feature + names. The `get_feature_names_out` method is only defined if + `feature_names_out` is not None. + + See ``get_feature_names_out`` for more details. + + .. versionadded:: 1.1 + + kw_args : dict, default=None + Dictionary of additional keyword arguments to pass to func. + + .. versionadded:: 0.18 + + inv_kw_args : dict, default=None + Dictionary of additional keyword arguments to pass to inverse_func. + + .. versionadded:: 0.18 + + Attributes + ---------- + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` has feature + names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MaxAbsScaler : Scale each feature by its maximum absolute value. + StandardScaler : Standardize features by removing the mean and + scaling to unit variance. + LabelBinarizer : Binarize labels in a one-vs-all fashion. + MultiLabelBinarizer : Transform between iterable of iterables + and a multilabel format. + + Notes + ----- + If `func` returns an output with a `columns` attribute, then the columns is enforced + to be consistent with the output of `get_feature_names_out`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import FunctionTransformer + >>> transformer = FunctionTransformer(np.log1p) + >>> X = np.array([[0, 1], [2, 3]]) + >>> transformer.transform(X) + array([[0. , 0.6931...], + [1.0986..., 1.3862...]]) + """ + + _parameter_constraints: dict = { + "func": [callable, None], + "inverse_func": [callable, None], + "validate": ["boolean"], + "accept_sparse": ["boolean"], + "check_inverse": ["boolean"], + "feature_names_out": [callable, StrOptions({"one-to-one"}), None], + "kw_args": [dict, None], + "inv_kw_args": [dict, None], + } + + def __init__( + self, + func=None, + inverse_func=None, + *, + validate=False, + accept_sparse=False, + check_inverse=True, + feature_names_out=None, + kw_args=None, + inv_kw_args=None, + ): + self.func = func + self.inverse_func = inverse_func + self.validate = validate + self.accept_sparse = accept_sparse + self.check_inverse = check_inverse + self.feature_names_out = feature_names_out + self.kw_args = kw_args + self.inv_kw_args = inv_kw_args + + def _check_input(self, X, *, reset): + if self.validate: + return self._validate_data(X, accept_sparse=self.accept_sparse, reset=reset) + elif reset: + # Set feature_names_in_ and n_features_in_ even if validate=False + # We run this only when reset==True to store the attributes but not + # validate them, because validate=False + self._check_n_features(X, reset=reset) + self._check_feature_names(X, reset=reset) + return X + + def _check_inverse_transform(self, X): + """Check that func and inverse_func are the inverse.""" + idx_selected = slice(None, None, max(1, X.shape[0] // 100)) + X_round_trip = self.inverse_transform(self.transform(X[idx_selected])) + + if hasattr(X, "dtype"): + dtypes = [X.dtype] + elif hasattr(X, "dtypes"): + # Dataframes can have multiple dtypes + dtypes = X.dtypes + + if not all(np.issubdtype(d, np.number) for d in dtypes): + raise ValueError( + "'check_inverse' is only supported when all the elements in `X` is" + " numerical." + ) + + if not _allclose_dense_sparse(X[idx_selected], X_round_trip): + warnings.warn( + ( + "The provided functions are not strictly" + " inverse of each other. If you are sure you" + " want to proceed regardless, set" + " 'check_inverse=False'." + ), + UserWarning, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit transformer by checking X. + + If ``validate`` is ``True``, ``X`` will be checked. + + Parameters + ---------- + X : {array-like, sparse-matrix} of shape (n_samples, n_features) \ + if `validate=True` else any object that `func` can handle + Input array. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + FunctionTransformer class instance. + """ + X = self._check_input(X, reset=True) + if self.check_inverse and not (self.func is None or self.inverse_func is None): + self._check_inverse_transform(X) + return self + + def transform(self, X): + """Transform X using the forward function. + + Parameters + ---------- + X : {array-like, sparse-matrix} of shape (n_samples, n_features) \ + if `validate=True` else any object that `func` can handle + Input array. + + Returns + ------- + X_out : array-like, shape (n_samples, n_features) + Transformed input. + """ + X = self._check_input(X, reset=False) + out = self._transform(X, func=self.func, kw_args=self.kw_args) + output_config = _get_output_config("transform", self)["dense"] + + if hasattr(out, "columns") and self.feature_names_out is not None: + # check the consistency between the column provided by `transform` and + # the the column names provided by `get_feature_names_out`. + feature_names_out = self.get_feature_names_out() + if list(out.columns) != list(feature_names_out): + # we can override the column names of the output if it is inconsistent + # with the column names provided by `get_feature_names_out` in the + # following cases: + # * `func` preserved the column names between the input and the output + # * the input column names are all numbers + # * the output is requested to be a DataFrame (pandas or polars) + feature_names_in = getattr( + X, "feature_names_in_", _get_feature_names(X) + ) + same_feature_names_in_out = feature_names_in is not None and list( + feature_names_in + ) == list(out.columns) + not_all_str_columns = not all( + isinstance(col, str) for col in out.columns + ) + if same_feature_names_in_out or not_all_str_columns: + adapter = _get_adapter_from_container(out) + out = adapter.create_container( + X_output=out, + X_original=out, + columns=feature_names_out, + inplace=False, + ) + else: + raise ValueError( + "The output generated by `func` have different column names " + "than the ones provided by `get_feature_names_out`. " + f"Got output with columns names: {list(out.columns)} and " + "`get_feature_names_out` returned: " + f"{list(self.get_feature_names_out())}. " + "The column names can be overridden by setting " + "`set_output(transform='pandas')` or " + "`set_output(transform='polars')` such that the column names " + "are set to the names provided by `get_feature_names_out`." + ) + + if self.feature_names_out is None: + warn_msg = ( + "When `set_output` is configured to be '{0}', `func` should return " + "a {0} DataFrame to follow the `set_output` API or `feature_names_out`" + " should be defined." + ) + if output_config == "pandas" and not _is_pandas_df(out): + warnings.warn(warn_msg.format("pandas")) + elif output_config == "polars" and not _is_polars_df(out): + warnings.warn(warn_msg.format("polars")) + + return out + + def inverse_transform(self, X): + """Transform X using the inverse function. + + Parameters + ---------- + X : {array-like, sparse-matrix} of shape (n_samples, n_features) \ + if `validate=True` else any object that `inverse_func` can handle + Input array. + + Returns + ------- + X_out : array-like, shape (n_samples, n_features) + Transformed input. + """ + if self.validate: + X = check_array(X, accept_sparse=self.accept_sparse) + return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args) + + @available_if(lambda self: self.feature_names_out is not None) + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + This method is only defined if `feature_names_out` is not None. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input feature names. + + - If `input_features` is None, then `feature_names_in_` is + used as the input feature names. If `feature_names_in_` is not + defined, then names are generated: + `[x0, x1, ..., x(n_features_in_ - 1)]`. + - If `input_features` is array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + + - If `feature_names_out` is 'one-to-one', the input feature names + are returned (see `input_features` above). This requires + `feature_names_in_` and/or `n_features_in_` to be defined, which + is done automatically if `validate=True`. Alternatively, you can + set them in `func`. + - If `feature_names_out` is a callable, then it is called with two + arguments, `self` and `input_features`, and its return value is + returned by this method. + """ + if hasattr(self, "n_features_in_") or input_features is not None: + input_features = _check_feature_names_in(self, input_features) + if self.feature_names_out == "one-to-one": + names_out = input_features + elif callable(self.feature_names_out): + names_out = self.feature_names_out(self, input_features) + else: + raise ValueError( + f"feature_names_out={self.feature_names_out!r} is invalid. " + 'It must either be "one-to-one" or a callable with two ' + "arguments: the function transformer and an array-like of " + "input feature names. The callable must return an array-like " + "of output feature names." + ) + return np.asarray(names_out, dtype=object) + + def _transform(self, X, func=None, kw_args=None): + if func is None: + func = _identity + + return func(X, **(kw_args if kw_args else {})) + + def __sklearn_is_fitted__(self): + """Return True since FunctionTransfomer is stateless.""" + return True + + def _more_tags(self): + return {"no_validation": not self.validate, "stateless": True} + + def set_output(self, *, transform=None): + """Set output container. + + See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` + for an example on how to use the API. + + Parameters + ---------- + transform : {"default", "pandas"}, default=None + Configure output of `transform` and `fit_transform`. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + .. versionadded:: 1.4 + `"polars"` option was added. + + Returns + ------- + self : estimator instance + Estimator instance. + """ + if not hasattr(self, "_sklearn_output_config"): + self._sklearn_output_config = {} + + self._sklearn_output_config["transform"] = transform + return self diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_label.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_label.py new file mode 100644 index 0000000000000000000000000000000000000000..bd009d52a685398c6f94fe1019c8437e95b98313 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_label.py @@ -0,0 +1,951 @@ +# Authors: Alexandre Gramfort +# Mathieu Blondel +# Olivier Grisel +# Andreas Mueller +# Joel Nothman +# Hamzeh Alsalhi +# License: BSD 3 clause + +import array +import itertools +import warnings +from collections import defaultdict +from numbers import Integral + +import numpy as np +import scipy.sparse as sp + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import column_or_1d +from ..utils._encode import _encode, _unique +from ..utils._param_validation import Interval, validate_params +from ..utils.multiclass import type_of_target, unique_labels +from ..utils.sparsefuncs import min_max_axis +from ..utils.validation import _num_samples, check_array, check_is_fitted + +__all__ = [ + "label_binarize", + "LabelBinarizer", + "LabelEncoder", + "MultiLabelBinarizer", +] + + +class LabelEncoder(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None): + """Encode target labels with value between 0 and n_classes-1. + + This transformer should be used to encode target values, *i.e.* `y`, and + not the input `X`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.12 + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + Holds the label for each class. + + See Also + -------- + OrdinalEncoder : Encode categorical features using an ordinal encoding + scheme. + OneHotEncoder : Encode categorical features as a one-hot numeric array. + + Examples + -------- + `LabelEncoder` can be used to normalize labels. + + >>> from sklearn.preprocessing import LabelEncoder + >>> le = LabelEncoder() + >>> le.fit([1, 2, 2, 6]) + LabelEncoder() + >>> le.classes_ + array([1, 2, 6]) + >>> le.transform([1, 1, 2, 6]) + array([0, 0, 1, 2]...) + >>> le.inverse_transform([0, 0, 1, 2]) + array([1, 1, 2, 6]) + + It can also be used to transform non-numerical labels (as long as they are + hashable and comparable) to numerical labels. + + >>> le = LabelEncoder() + >>> le.fit(["paris", "paris", "tokyo", "amsterdam"]) + LabelEncoder() + >>> list(le.classes_) + ['amsterdam', 'paris', 'tokyo'] + >>> le.transform(["tokyo", "tokyo", "paris"]) + array([2, 2, 1]...) + >>> list(le.inverse_transform([2, 2, 1])) + ['tokyo', 'tokyo', 'paris'] + """ + + def fit(self, y): + """Fit label encoder. + + Parameters + ---------- + y : array-like of shape (n_samples,) + Target values. + + Returns + ------- + self : returns an instance of self. + Fitted label encoder. + """ + y = column_or_1d(y, warn=True) + self.classes_ = _unique(y) + return self + + def fit_transform(self, y): + """Fit label encoder and return encoded labels. + + Parameters + ---------- + y : array-like of shape (n_samples,) + Target values. + + Returns + ------- + y : array-like of shape (n_samples,) + Encoded labels. + """ + y = column_or_1d(y, warn=True) + self.classes_, y = _unique(y, return_inverse=True) + return y + + def transform(self, y): + """Transform labels to normalized encoding. + + Parameters + ---------- + y : array-like of shape (n_samples,) + Target values. + + Returns + ------- + y : array-like of shape (n_samples,) + Labels as normalized encodings. + """ + check_is_fitted(self) + y = column_or_1d(y, dtype=self.classes_.dtype, warn=True) + # transform of empty array is empty array + if _num_samples(y) == 0: + return np.array([]) + + return _encode(y, uniques=self.classes_) + + def inverse_transform(self, y): + """Transform labels back to original encoding. + + Parameters + ---------- + y : ndarray of shape (n_samples,) + Target values. + + Returns + ------- + y : ndarray of shape (n_samples,) + Original encoding. + """ + check_is_fitted(self) + y = column_or_1d(y, warn=True) + # inverse transform of empty array is empty array + if _num_samples(y) == 0: + return np.array([]) + + diff = np.setdiff1d(y, np.arange(len(self.classes_))) + if len(diff): + raise ValueError("y contains previously unseen labels: %s" % str(diff)) + y = np.asarray(y) + return self.classes_[y] + + def _more_tags(self): + return {"X_types": ["1dlabels"]} + + +class LabelBinarizer(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None): + """Binarize labels in a one-vs-all fashion. + + Several regression and binary classification algorithms are + available in scikit-learn. A simple way to extend these algorithms + to the multi-class classification case is to use the so-called + one-vs-all scheme. + + At learning time, this simply consists in learning one regressor + or binary classifier per class. In doing so, one needs to convert + multi-class labels to binary labels (belong or does not belong + to the class). `LabelBinarizer` makes this process easy with the + transform method. + + At prediction time, one assigns the class for which the corresponding + model gave the greatest confidence. `LabelBinarizer` makes this easy + with the :meth:`inverse_transform` method. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + neg_label : int, default=0 + Value with which negative labels must be encoded. + + pos_label : int, default=1 + Value with which positive labels must be encoded. + + sparse_output : bool, default=False + True if the returned array from transform is desired to be in sparse + CSR format. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + Holds the label for each class. + + y_type_ : str + Represents the type of the target data as evaluated by + :func:`~sklearn.utils.multiclass.type_of_target`. Possible type are + 'continuous', 'continuous-multioutput', 'binary', 'multiclass', + 'multiclass-multioutput', 'multilabel-indicator', and 'unknown'. + + sparse_input_ : bool + `True` if the input data to transform is given as a sparse matrix, + `False` otherwise. + + See Also + -------- + label_binarize : Function to perform the transform operation of + LabelBinarizer with fixed classes. + OneHotEncoder : Encode categorical features using a one-hot aka one-of-K + scheme. + + Examples + -------- + >>> from sklearn.preprocessing import LabelBinarizer + >>> lb = LabelBinarizer() + >>> lb.fit([1, 2, 6, 4, 2]) + LabelBinarizer() + >>> lb.classes_ + array([1, 2, 4, 6]) + >>> lb.transform([1, 6]) + array([[1, 0, 0, 0], + [0, 0, 0, 1]]) + + Binary targets transform to a column vector + + >>> lb = LabelBinarizer() + >>> lb.fit_transform(['yes', 'no', 'no', 'yes']) + array([[1], + [0], + [0], + [1]]) + + Passing a 2D matrix for multilabel classification + + >>> import numpy as np + >>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]])) + LabelBinarizer() + >>> lb.classes_ + array([0, 1, 2]) + >>> lb.transform([0, 1, 2, 1]) + array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 1, 0]]) + """ + + _parameter_constraints: dict = { + "neg_label": [Integral], + "pos_label": [Integral], + "sparse_output": ["boolean"], + } + + def __init__(self, *, neg_label=0, pos_label=1, sparse_output=False): + self.neg_label = neg_label + self.pos_label = pos_label + self.sparse_output = sparse_output + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, y): + """Fit label binarizer. + + Parameters + ---------- + y : ndarray of shape (n_samples,) or (n_samples, n_classes) + Target values. The 2-d matrix should only contain 0 and 1, + represents multilabel classification. + + Returns + ------- + self : object + Returns the instance itself. + """ + if self.neg_label >= self.pos_label: + raise ValueError( + f"neg_label={self.neg_label} must be strictly less than " + f"pos_label={self.pos_label}." + ) + + if self.sparse_output and (self.pos_label == 0 or self.neg_label != 0): + raise ValueError( + "Sparse binarization is only supported with non " + "zero pos_label and zero neg_label, got " + f"pos_label={self.pos_label} and neg_label={self.neg_label}" + ) + + self.y_type_ = type_of_target(y, input_name="y") + + if "multioutput" in self.y_type_: + raise ValueError( + "Multioutput target data is not supported with label binarization" + ) + if _num_samples(y) == 0: + raise ValueError("y has 0 samples: %r" % y) + + self.sparse_input_ = sp.issparse(y) + self.classes_ = unique_labels(y) + return self + + def fit_transform(self, y): + """Fit label binarizer/transform multi-class labels to binary labels. + + The output of transform is sometimes referred to as + the 1-of-K coding scheme. + + Parameters + ---------- + y : {ndarray, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_classes) + Target values. The 2-d matrix should only contain 0 and 1, + represents multilabel classification. Sparse matrix can be + CSR, CSC, COO, DOK, or LIL. + + Returns + ------- + Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) + Shape will be (n_samples, 1) for binary problems. Sparse matrix + will be of CSR format. + """ + return self.fit(y).transform(y) + + def transform(self, y): + """Transform multi-class labels to binary labels. + + The output of transform is sometimes referred to by some authors as + the 1-of-K coding scheme. + + Parameters + ---------- + y : {array, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_classes) + Target values. The 2-d matrix should only contain 0 and 1, + represents multilabel classification. Sparse matrix can be + CSR, CSC, COO, DOK, or LIL. + + Returns + ------- + Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) + Shape will be (n_samples, 1) for binary problems. Sparse matrix + will be of CSR format. + """ + check_is_fitted(self) + + y_is_multilabel = type_of_target(y).startswith("multilabel") + if y_is_multilabel and not self.y_type_.startswith("multilabel"): + raise ValueError("The object was not fitted with multilabel input.") + + return label_binarize( + y, + classes=self.classes_, + pos_label=self.pos_label, + neg_label=self.neg_label, + sparse_output=self.sparse_output, + ) + + def inverse_transform(self, Y, threshold=None): + """Transform binary labels back to multi-class labels. + + Parameters + ---------- + Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) + Target values. All sparse matrices are converted to CSR before + inverse transformation. + + threshold : float, default=None + Threshold used in the binary and multi-label cases. + + Use 0 when ``Y`` contains the output of :term:`decision_function` + (classifier). + Use 0.5 when ``Y`` contains the output of :term:`predict_proba`. + + If None, the threshold is assumed to be half way between + neg_label and pos_label. + + Returns + ------- + y : {ndarray, sparse matrix} of shape (n_samples,) + Target values. Sparse matrix will be of CSR format. + + Notes + ----- + In the case when the binary labels are fractional + (probabilistic), :meth:`inverse_transform` chooses the class with the + greatest value. Typically, this allows to use the output of a + linear model's :term:`decision_function` method directly as the input + of :meth:`inverse_transform`. + """ + check_is_fitted(self) + + if threshold is None: + threshold = (self.pos_label + self.neg_label) / 2.0 + + if self.y_type_ == "multiclass": + y_inv = _inverse_binarize_multiclass(Y, self.classes_) + else: + y_inv = _inverse_binarize_thresholding( + Y, self.y_type_, self.classes_, threshold + ) + + if self.sparse_input_: + y_inv = sp.csr_matrix(y_inv) + elif sp.issparse(y_inv): + y_inv = y_inv.toarray() + + return y_inv + + def _more_tags(self): + return {"X_types": ["1dlabels"]} + + +@validate_params( + { + "y": ["array-like"], + "classes": ["array-like"], + "neg_label": [Interval(Integral, None, None, closed="neither")], + "pos_label": [Interval(Integral, None, None, closed="neither")], + "sparse_output": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def label_binarize(y, *, classes, neg_label=0, pos_label=1, sparse_output=False): + """Binarize labels in a one-vs-all fashion. + + Several regression and binary classification algorithms are + available in scikit-learn. A simple way to extend these algorithms + to the multi-class classification case is to use the so-called + one-vs-all scheme. + + This function makes it possible to compute this transformation for a + fixed set of class labels known ahead of time. + + Parameters + ---------- + y : array-like + Sequence of integer labels or multilabel data to encode. + + classes : array-like of shape (n_classes,) + Uniquely holds the label for each class. + + neg_label : int, default=0 + Value with which negative labels must be encoded. + + pos_label : int, default=1 + Value with which positive labels must be encoded. + + sparse_output : bool, default=False, + Set to true if output binary array is desired in CSR sparse format. + + Returns + ------- + Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) + Shape will be (n_samples, 1) for binary problems. Sparse matrix will + be of CSR format. + + See Also + -------- + LabelBinarizer : Class used to wrap the functionality of label_binarize and + allow for fitting to classes independently of the transform operation. + + Examples + -------- + >>> from sklearn.preprocessing import label_binarize + >>> label_binarize([1, 6], classes=[1, 2, 4, 6]) + array([[1, 0, 0, 0], + [0, 0, 0, 1]]) + + The class ordering is preserved: + + >>> label_binarize([1, 6], classes=[1, 6, 4, 2]) + array([[1, 0, 0, 0], + [0, 1, 0, 0]]) + + Binary targets transform to a column vector + + >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes']) + array([[1], + [0], + [0], + [1]]) + """ + if not isinstance(y, list): + # XXX Workaround that will be removed when list of list format is + # dropped + y = check_array( + y, input_name="y", accept_sparse="csr", ensure_2d=False, dtype=None + ) + else: + if _num_samples(y) == 0: + raise ValueError("y has 0 samples: %r" % y) + if neg_label >= pos_label: + raise ValueError( + "neg_label={0} must be strictly less than pos_label={1}.".format( + neg_label, pos_label + ) + ) + + if sparse_output and (pos_label == 0 or neg_label != 0): + raise ValueError( + "Sparse binarization is only supported with non " + "zero pos_label and zero neg_label, got " + "pos_label={0} and neg_label={1}" + "".format(pos_label, neg_label) + ) + + # To account for pos_label == 0 in the dense case + pos_switch = pos_label == 0 + if pos_switch: + pos_label = -neg_label + + y_type = type_of_target(y) + if "multioutput" in y_type: + raise ValueError( + "Multioutput target data is not supported with label binarization" + ) + if y_type == "unknown": + raise ValueError("The type of target data is not known") + + n_samples = y.shape[0] if sp.issparse(y) else len(y) + n_classes = len(classes) + classes = np.asarray(classes) + + if y_type == "binary": + if n_classes == 1: + if sparse_output: + return sp.csr_matrix((n_samples, 1), dtype=int) + else: + Y = np.zeros((len(y), 1), dtype=int) + Y += neg_label + return Y + elif len(classes) >= 3: + y_type = "multiclass" + + sorted_class = np.sort(classes) + if y_type == "multilabel-indicator": + y_n_classes = y.shape[1] if hasattr(y, "shape") else len(y[0]) + if classes.size != y_n_classes: + raise ValueError( + "classes {0} mismatch with the labels {1} found in the data".format( + classes, unique_labels(y) + ) + ) + + if y_type in ("binary", "multiclass"): + y = column_or_1d(y) + + # pick out the known labels from y + y_in_classes = np.isin(y, classes) + y_seen = y[y_in_classes] + indices = np.searchsorted(sorted_class, y_seen) + indptr = np.hstack((0, np.cumsum(y_in_classes))) + + data = np.empty_like(indices) + data.fill(pos_label) + Y = sp.csr_matrix((data, indices, indptr), shape=(n_samples, n_classes)) + elif y_type == "multilabel-indicator": + Y = sp.csr_matrix(y) + if pos_label != 1: + data = np.empty_like(Y.data) + data.fill(pos_label) + Y.data = data + else: + raise ValueError( + "%s target data is not supported with label binarization" % y_type + ) + + if not sparse_output: + Y = Y.toarray() + Y = Y.astype(int, copy=False) + + if neg_label != 0: + Y[Y == 0] = neg_label + + if pos_switch: + Y[Y == pos_label] = 0 + else: + Y.data = Y.data.astype(int, copy=False) + + # preserve label ordering + if np.any(classes != sorted_class): + indices = np.searchsorted(sorted_class, classes) + Y = Y[:, indices] + + if y_type == "binary": + if sparse_output: + Y = Y.getcol(-1) + else: + Y = Y[:, -1].reshape((-1, 1)) + + return Y + + +def _inverse_binarize_multiclass(y, classes): + """Inverse label binarization transformation for multiclass. + + Multiclass uses the maximal score instead of a threshold. + """ + classes = np.asarray(classes) + + if sp.issparse(y): + # Find the argmax for each row in y where y is a CSR matrix + + y = y.tocsr() + n_samples, n_outputs = y.shape + outputs = np.arange(n_outputs) + row_max = min_max_axis(y, 1)[1] + row_nnz = np.diff(y.indptr) + + y_data_repeated_max = np.repeat(row_max, row_nnz) + # picks out all indices obtaining the maximum per row + y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data) + + # For corner case where last row has a max of 0 + if row_max[-1] == 0: + y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)]) + + # Gets the index of the first argmax in each row from y_i_all_argmax + index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1]) + # first argmax of each row + y_ind_ext = np.append(y.indices, [0]) + y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]] + # Handle rows of all 0 + y_i_argmax[np.where(row_nnz == 0)[0]] = 0 + + # Handles rows with max of 0 that contain negative numbers + samples = np.arange(n_samples)[(row_nnz > 0) & (row_max.ravel() == 0)] + for i in samples: + ind = y.indices[y.indptr[i] : y.indptr[i + 1]] + y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0] + + return classes[y_i_argmax] + else: + return classes.take(y.argmax(axis=1), mode="clip") + + +def _inverse_binarize_thresholding(y, output_type, classes, threshold): + """Inverse label binarization transformation using thresholding.""" + + if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2: + raise ValueError("output_type='binary', but y.shape = {0}".format(y.shape)) + + if output_type != "binary" and y.shape[1] != len(classes): + raise ValueError( + "The number of class is not equal to the number of dimension of y." + ) + + classes = np.asarray(classes) + + # Perform thresholding + if sp.issparse(y): + if threshold > 0: + if y.format not in ("csr", "csc"): + y = y.tocsr() + y.data = np.array(y.data > threshold, dtype=int) + y.eliminate_zeros() + else: + y = np.array(y.toarray() > threshold, dtype=int) + else: + y = np.array(y > threshold, dtype=int) + + # Inverse transform data + if output_type == "binary": + if sp.issparse(y): + y = y.toarray() + if y.ndim == 2 and y.shape[1] == 2: + return classes[y[:, 1]] + else: + if len(classes) == 1: + return np.repeat(classes[0], len(y)) + else: + return classes[y.ravel()] + + elif output_type == "multilabel-indicator": + return y + + else: + raise ValueError("{0} format is not supported".format(output_type)) + + +class MultiLabelBinarizer(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None): + """Transform between iterable of iterables and a multilabel format. + + Although a list of sets or tuples is a very intuitive format for multilabel + data, it is unwieldy to process. This transformer converts between this + intuitive format and the supported multilabel format: a (samples x classes) + binary matrix indicating the presence of a class label. + + Parameters + ---------- + classes : array-like of shape (n_classes,), default=None + Indicates an ordering for the class labels. + All entries should be unique (cannot contain duplicate classes). + + sparse_output : bool, default=False + Set to True if output binary array is desired in CSR sparse format. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + A copy of the `classes` parameter when provided. + Otherwise it corresponds to the sorted set of classes found + when fitting. + + See Also + -------- + OneHotEncoder : Encode categorical features using a one-hot aka one-of-K + scheme. + + Examples + -------- + >>> from sklearn.preprocessing import MultiLabelBinarizer + >>> mlb = MultiLabelBinarizer() + >>> mlb.fit_transform([(1, 2), (3,)]) + array([[1, 1, 0], + [0, 0, 1]]) + >>> mlb.classes_ + array([1, 2, 3]) + + >>> mlb.fit_transform([{'sci-fi', 'thriller'}, {'comedy'}]) + array([[0, 1, 1], + [1, 0, 0]]) + >>> list(mlb.classes_) + ['comedy', 'sci-fi', 'thriller'] + + A common mistake is to pass in a list, which leads to the following issue: + + >>> mlb = MultiLabelBinarizer() + >>> mlb.fit(['sci-fi', 'thriller', 'comedy']) + MultiLabelBinarizer() + >>> mlb.classes_ + array(['-', 'c', 'd', 'e', 'f', 'h', 'i', 'l', 'm', 'o', 'r', 's', 't', + 'y'], dtype=object) + + To correct this, the list of labels should be passed in as: + + >>> mlb = MultiLabelBinarizer() + >>> mlb.fit([['sci-fi', 'thriller', 'comedy']]) + MultiLabelBinarizer() + >>> mlb.classes_ + array(['comedy', 'sci-fi', 'thriller'], dtype=object) + """ + + _parameter_constraints: dict = { + "classes": ["array-like", None], + "sparse_output": ["boolean"], + } + + def __init__(self, *, classes=None, sparse_output=False): + self.classes = classes + self.sparse_output = sparse_output + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, y): + """Fit the label sets binarizer, storing :term:`classes_`. + + Parameters + ---------- + y : iterable of iterables + A set of labels (any orderable and hashable object) for each + sample. If the `classes` parameter is set, `y` will not be + iterated. + + Returns + ------- + self : object + Fitted estimator. + """ + self._cached_dict = None + + if self.classes is None: + classes = sorted(set(itertools.chain.from_iterable(y))) + elif len(set(self.classes)) < len(self.classes): + raise ValueError( + "The classes argument contains duplicate " + "classes. Remove these duplicates before passing " + "them to MultiLabelBinarizer." + ) + else: + classes = self.classes + dtype = int if all(isinstance(c, int) for c in classes) else object + self.classes_ = np.empty(len(classes), dtype=dtype) + self.classes_[:] = classes + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, y): + """Fit the label sets binarizer and transform the given label sets. + + Parameters + ---------- + y : iterable of iterables + A set of labels (any orderable and hashable object) for each + sample. If the `classes` parameter is set, `y` will not be + iterated. + + Returns + ------- + y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes) + A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` + is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR + format. + """ + if self.classes is not None: + return self.fit(y).transform(y) + + self._cached_dict = None + + # Automatically increment on new class + class_mapping = defaultdict(int) + class_mapping.default_factory = class_mapping.__len__ + yt = self._transform(y, class_mapping) + + # sort classes and reorder columns + tmp = sorted(class_mapping, key=class_mapping.get) + + # (make safe for tuples) + dtype = int if all(isinstance(c, int) for c in tmp) else object + class_mapping = np.empty(len(tmp), dtype=dtype) + class_mapping[:] = tmp + self.classes_, inverse = np.unique(class_mapping, return_inverse=True) + # ensure yt.indices keeps its current dtype + yt.indices = np.asarray(inverse[yt.indices], dtype=yt.indices.dtype) + + if not self.sparse_output: + yt = yt.toarray() + + return yt + + def transform(self, y): + """Transform the given label sets. + + Parameters + ---------- + y : iterable of iterables + A set of labels (any orderable and hashable object) for each + sample. If the `classes` parameter is set, `y` will not be + iterated. + + Returns + ------- + y_indicator : array or CSR matrix, shape (n_samples, n_classes) + A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in + `y[i]`, and 0 otherwise. + """ + check_is_fitted(self) + + class_to_index = self._build_cache() + yt = self._transform(y, class_to_index) + + if not self.sparse_output: + yt = yt.toarray() + + return yt + + def _build_cache(self): + if self._cached_dict is None: + self._cached_dict = dict(zip(self.classes_, range(len(self.classes_)))) + + return self._cached_dict + + def _transform(self, y, class_mapping): + """Transforms the label sets with a given mapping. + + Parameters + ---------- + y : iterable of iterables + A set of labels (any orderable and hashable object) for each + sample. If the `classes` parameter is set, `y` will not be + iterated. + + class_mapping : Mapping + Maps from label to column index in label indicator matrix. + + Returns + ------- + y_indicator : sparse matrix of shape (n_samples, n_classes) + Label indicator matrix. Will be of CSR format. + """ + indices = array.array("i") + indptr = array.array("i", [0]) + unknown = set() + for labels in y: + index = set() + for label in labels: + try: + index.add(class_mapping[label]) + except KeyError: + unknown.add(label) + indices.extend(index) + indptr.append(len(indices)) + if unknown: + warnings.warn( + "unknown class(es) {0} will be ignored".format(sorted(unknown, key=str)) + ) + data = np.ones(len(indices), dtype=int) + + return sp.csr_matrix( + (data, indices, indptr), shape=(len(indptr) - 1, len(class_mapping)) + ) + + def inverse_transform(self, yt): + """Transform the given indicator matrix into label sets. + + Parameters + ---------- + yt : {ndarray, sparse matrix} of shape (n_samples, n_classes) + A matrix containing only 1s ands 0s. + + Returns + ------- + y : list of tuples + The set of labels for each sample such that `y[i]` consists of + `classes_[j]` for each `yt[i, j] == 1`. + """ + check_is_fitted(self) + + if yt.shape[1] != len(self.classes_): + raise ValueError( + "Expected indicator for {0} classes, but got {1}".format( + len(self.classes_), yt.shape[1] + ) + ) + + if sp.issparse(yt): + yt = yt.tocsr() + if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0: + raise ValueError("Expected only 0s and 1s in label indicator.") + return [ + tuple(self.classes_.take(yt.indices[start:end])) + for start, end in zip(yt.indptr[:-1], yt.indptr[1:]) + ] + else: + unexpected = np.setdiff1d(yt, [0, 1]) + if len(unexpected) > 0: + raise ValueError( + "Expected only 0s and 1s in label indicator. Also got {0}".format( + unexpected + ) + ) + return [tuple(self.classes_.compress(indicators)) for indicators in yt] + + def _more_tags(self): + return {"X_types": ["2dlabels"]} diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_polynomial.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..2512f411a5a9c20cb3c182b258b54e7e716496e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_polynomial.py @@ -0,0 +1,1172 @@ +""" +This file contains preprocessing tools based on polynomials. +""" +import collections +from itertools import chain, combinations +from itertools import combinations_with_replacement as combinations_w_r +from numbers import Integral + +import numpy as np +from scipy import sparse +from scipy.interpolate import BSpline +from scipy.special import comb + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import check_array +from ..utils._param_validation import Interval, StrOptions +from ..utils.fixes import parse_version, sp_version +from ..utils.stats import _weighted_percentile +from ..utils.validation import ( + FLOAT_DTYPES, + _check_feature_names_in, + _check_sample_weight, + check_is_fitted, +) +from ._csr_polynomial_expansion import ( + _calc_expanded_nnz, + _calc_total_nnz, + _csr_polynomial_expansion, +) + +__all__ = [ + "PolynomialFeatures", + "SplineTransformer", +] + + +def _create_expansion(X, interaction_only, deg, n_features, cumulative_size=0): + """Helper function for creating and appending sparse expansion matrices""" + + total_nnz = _calc_total_nnz(X.indptr, interaction_only, deg) + expanded_col = _calc_expanded_nnz(n_features, interaction_only, deg) + + if expanded_col == 0: + return None + # This only checks whether each block needs 64bit integers upon + # expansion. We prefer to keep int32 indexing where we can, + # since currently SciPy's CSR construction downcasts when possible, + # so we prefer to avoid an unnecessary cast. The dtype may still + # change in the concatenation process if needed. + # See: https://github.com/scipy/scipy/issues/16569 + max_indices = expanded_col - 1 + max_indptr = total_nnz + max_int32 = np.iinfo(np.int32).max + needs_int64 = max(max_indices, max_indptr) > max_int32 + index_dtype = np.int64 if needs_int64 else np.int32 + + # This is a pretty specific bug that is hard to work around by a user, + # hence we do not detail the entire bug and all possible avoidance + # mechnasisms. Instead we recommend upgrading scipy or shrinking their data. + cumulative_size += expanded_col + if ( + sp_version < parse_version("1.8.0") + and cumulative_size - 1 > max_int32 + and not needs_int64 + ): + raise ValueError( + "In scipy versions `<1.8.0`, the function `scipy.sparse.hstack`" + " sometimes produces negative columns when the output shape contains" + " `n_cols` too large to be represented by a 32bit signed" + " integer. To avoid this error, either use a version" + " of scipy `>=1.8.0` or alter the `PolynomialFeatures`" + " transformer to produce fewer than 2^31 output features." + ) + + # Result of the expansion, modified in place by the + # `_csr_polynomial_expansion` routine. + expanded_data = np.empty(shape=total_nnz, dtype=X.data.dtype) + expanded_indices = np.empty(shape=total_nnz, dtype=index_dtype) + expanded_indptr = np.empty(shape=X.indptr.shape[0], dtype=index_dtype) + _csr_polynomial_expansion( + X.data, + X.indices, + X.indptr, + X.shape[1], + expanded_data, + expanded_indices, + expanded_indptr, + interaction_only, + deg, + ) + return sparse.csr_matrix( + (expanded_data, expanded_indices, expanded_indptr), + shape=(X.indptr.shape[0] - 1, expanded_col), + dtype=X.dtype, + ) + + +class PolynomialFeatures(TransformerMixin, BaseEstimator): + """Generate polynomial and interaction features. + + Generate a new feature matrix consisting of all polynomial combinations + of the features with degree less than or equal to the specified degree. + For example, if an input sample is two dimensional and of the form + [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + degree : int or tuple (min_degree, max_degree), default=2 + If a single int is given, it specifies the maximal degree of the + polynomial features. If a tuple `(min_degree, max_degree)` is passed, + then `min_degree` is the minimum and `max_degree` is the maximum + polynomial degree of the generated features. Note that `min_degree=0` + and `min_degree=1` are equivalent as outputting the degree zero term is + determined by `include_bias`. + + interaction_only : bool, default=False + If `True`, only interaction features are produced: features that are + products of at most `degree` *distinct* input features, i.e. terms with + power of 2 or higher of the same input feature are excluded: + + - included: `x[0]`, `x[1]`, `x[0] * x[1]`, etc. + - excluded: `x[0] ** 2`, `x[0] ** 2 * x[1]`, etc. + + include_bias : bool, default=True + If `True` (default), then include a bias column, the feature in which + all polynomial powers are zero (i.e. a column of ones - acts as an + intercept term in a linear model). + + order : {'C', 'F'}, default='C' + Order of output array in the dense case. `'F'` order is faster to + compute, but may slow down subsequent estimators. + + .. versionadded:: 0.21 + + Attributes + ---------- + powers_ : ndarray of shape (`n_output_features_`, `n_features_in_`) + `powers_[i, j]` is the exponent of the jth input in the ith output. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_output_features_ : int + The total number of polynomial output features. The number of output + features is computed by iterating over all suitably sized combinations + of input features. + + See Also + -------- + SplineTransformer : Transformer that generates univariate B-spline bases + for features. + + Notes + ----- + Be aware that the number of features in the output array scales + polynomially in the number of features of the input array, and + exponentially in the degree. High degrees can cause overfitting. + + See :ref:`examples/linear_model/plot_polynomial_interpolation.py + ` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import PolynomialFeatures + >>> X = np.arange(6).reshape(3, 2) + >>> X + array([[0, 1], + [2, 3], + [4, 5]]) + >>> poly = PolynomialFeatures(2) + >>> poly.fit_transform(X) + array([[ 1., 0., 1., 0., 0., 1.], + [ 1., 2., 3., 4., 6., 9.], + [ 1., 4., 5., 16., 20., 25.]]) + >>> poly = PolynomialFeatures(interaction_only=True) + >>> poly.fit_transform(X) + array([[ 1., 0., 1., 0.], + [ 1., 2., 3., 6.], + [ 1., 4., 5., 20.]]) + """ + + _parameter_constraints: dict = { + "degree": [Interval(Integral, 0, None, closed="left"), "array-like"], + "interaction_only": ["boolean"], + "include_bias": ["boolean"], + "order": [StrOptions({"C", "F"})], + } + + def __init__( + self, degree=2, *, interaction_only=False, include_bias=True, order="C" + ): + self.degree = degree + self.interaction_only = interaction_only + self.include_bias = include_bias + self.order = order + + @staticmethod + def _combinations( + n_features, min_degree, max_degree, interaction_only, include_bias + ): + comb = combinations if interaction_only else combinations_w_r + start = max(1, min_degree) + iter = chain.from_iterable( + comb(range(n_features), i) for i in range(start, max_degree + 1) + ) + if include_bias: + iter = chain(comb(range(n_features), 0), iter) + return iter + + @staticmethod + def _num_combinations( + n_features, min_degree, max_degree, interaction_only, include_bias + ): + """Calculate number of terms in polynomial expansion + + This should be equivalent to counting the number of terms returned by + _combinations(...) but much faster. + """ + + if interaction_only: + combinations = sum( + [ + comb(n_features, i, exact=True) + for i in range(max(1, min_degree), min(max_degree, n_features) + 1) + ] + ) + else: + combinations = comb(n_features + max_degree, max_degree, exact=True) - 1 + if min_degree > 0: + d = min_degree - 1 + combinations -= comb(n_features + d, d, exact=True) - 1 + + if include_bias: + combinations += 1 + + return combinations + + @property + def powers_(self): + """Exponent for each of the inputs in the output.""" + check_is_fitted(self) + + combinations = self._combinations( + n_features=self.n_features_in_, + min_degree=self._min_degree, + max_degree=self._max_degree, + interaction_only=self.interaction_only, + include_bias=self.include_bias, + ) + return np.vstack( + [np.bincount(c, minlength=self.n_features_in_) for c in combinations] + ) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features is None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + powers = self.powers_ + input_features = _check_feature_names_in(self, input_features) + feature_names = [] + for row in powers: + inds = np.where(row)[0] + if len(inds): + name = " ".join( + ( + "%s^%d" % (input_features[ind], exp) + if exp != 1 + else input_features[ind] + ) + for ind, exp in zip(inds, row[inds]) + ) + else: + name = "1" + feature_names.append(name) + return np.asarray(feature_names, dtype=object) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """ + Compute number of output features. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + Fitted transformer. + """ + _, n_features = self._validate_data(X, accept_sparse=True).shape + + if isinstance(self.degree, Integral): + if self.degree == 0 and not self.include_bias: + raise ValueError( + "Setting degree to zero and include_bias to False would result in" + " an empty output array." + ) + + self._min_degree = 0 + self._max_degree = self.degree + elif ( + isinstance(self.degree, collections.abc.Iterable) and len(self.degree) == 2 + ): + self._min_degree, self._max_degree = self.degree + if not ( + isinstance(self._min_degree, Integral) + and isinstance(self._max_degree, Integral) + and self._min_degree >= 0 + and self._min_degree <= self._max_degree + ): + raise ValueError( + "degree=(min_degree, max_degree) must " + "be non-negative integers that fulfil " + "min_degree <= max_degree, got " + f"{self.degree}." + ) + elif self._max_degree == 0 and not self.include_bias: + raise ValueError( + "Setting both min_degree and max_degree to zero and include_bias to" + " False would result in an empty output array." + ) + else: + raise ValueError( + "degree must be a non-negative int or tuple " + "(min_degree, max_degree), got " + f"{self.degree}." + ) + + self.n_output_features_ = self._num_combinations( + n_features=n_features, + min_degree=self._min_degree, + max_degree=self._max_degree, + interaction_only=self.interaction_only, + include_bias=self.include_bias, + ) + if self.n_output_features_ > np.iinfo(np.intp).max: + msg = ( + "The output that would result from the current configuration would" + f" have {self.n_output_features_} features which is too large to be" + f" indexed by {np.intp().dtype.name}. Please change some or all of the" + " following:\n- The number of features in the input, currently" + f" {n_features=}\n- The range of degrees to calculate, currently" + f" [{self._min_degree}, {self._max_degree}]\n- Whether to include only" + f" interaction terms, currently {self.interaction_only}\n- Whether to" + f" include a bias term, currently {self.include_bias}." + ) + if ( + np.intp == np.int32 + and self.n_output_features_ <= np.iinfo(np.int64).max + ): # pragma: nocover + msg += ( + "\nNote that the current Python runtime has a limited 32 bit " + "address space and that this configuration would have been " + "admissible if run on a 64 bit Python runtime." + ) + raise ValueError(msg) + # We also record the number of output features for + # _max_degree = 0 + self._n_out_full = self._num_combinations( + n_features=n_features, + min_degree=0, + max_degree=self._max_degree, + interaction_only=self.interaction_only, + include_bias=self.include_bias, + ) + + return self + + def transform(self, X): + """Transform data to polynomial features. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to transform, row by row. + + Prefer CSR over CSC for sparse input (for speed), but CSC is + required if the degree is 4 or higher. If the degree is less than + 4 and the input format is CSC, it will be converted to CSR, have + its polynomial features generated, then converted back to CSC. + + If the degree is 2 or 3, the method described in "Leveraging + Sparsity to Speed Up Polynomial Feature Expansions of CSR Matrices + Using K-Simplex Numbers" by Andrew Nystrom and John Hughes is + used, which is much faster than the method used on CSC input. For + this reason, a CSC input will be converted to CSR, and the output + will be converted back to CSC prior to being returned, hence the + preference of CSR. + + Returns + ------- + XP : {ndarray, sparse matrix} of shape (n_samples, NP) + The matrix of features, where `NP` is the number of polynomial + features generated from the combination of inputs. If a sparse + matrix is provided, it will be converted into a sparse + `csr_matrix`. + """ + check_is_fitted(self) + + X = self._validate_data( + X, order="F", dtype=FLOAT_DTYPES, reset=False, accept_sparse=("csr", "csc") + ) + + n_samples, n_features = X.shape + max_int32 = np.iinfo(np.int32).max + if sparse.issparse(X) and X.format == "csr": + if self._max_degree > 3: + return self.transform(X.tocsc()).tocsr() + to_stack = [] + if self.include_bias: + to_stack.append( + sparse.csr_matrix(np.ones(shape=(n_samples, 1), dtype=X.dtype)) + ) + if self._min_degree <= 1 and self._max_degree > 0: + to_stack.append(X) + + cumulative_size = sum(mat.shape[1] for mat in to_stack) + for deg in range(max(2, self._min_degree), self._max_degree + 1): + expanded = _create_expansion( + X=X, + interaction_only=self.interaction_only, + deg=deg, + n_features=n_features, + cumulative_size=cumulative_size, + ) + if expanded is not None: + to_stack.append(expanded) + cumulative_size += expanded.shape[1] + if len(to_stack) == 0: + # edge case: deal with empty matrix + XP = sparse.csr_matrix((n_samples, 0), dtype=X.dtype) + else: + # `scipy.sparse.hstack` breaks in scipy<1.9.2 + # when `n_output_features_ > max_int32` + all_int32 = all(mat.indices.dtype == np.int32 for mat in to_stack) + if ( + sp_version < parse_version("1.9.2") + and self.n_output_features_ > max_int32 + and all_int32 + ): + raise ValueError( # pragma: no cover + "In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`" + " produces negative columns when:\n1. The output shape contains" + " `n_cols` too large to be represented by a 32bit signed" + " integer.\n2. All sub-matrices to be stacked have indices of" + " dtype `np.int32`.\nTo avoid this error, either use a version" + " of scipy `>=1.9.2` or alter the `PolynomialFeatures`" + " transformer to produce fewer than 2^31 output features" + ) + XP = sparse.hstack(to_stack, dtype=X.dtype, format="csr") + elif sparse.issparse(X) and X.format == "csc" and self._max_degree < 4: + return self.transform(X.tocsr()).tocsc() + elif sparse.issparse(X): + combinations = self._combinations( + n_features=n_features, + min_degree=self._min_degree, + max_degree=self._max_degree, + interaction_only=self.interaction_only, + include_bias=self.include_bias, + ) + columns = [] + for combi in combinations: + if combi: + out_col = 1 + for col_idx in combi: + out_col = X[:, [col_idx]].multiply(out_col) + columns.append(out_col) + else: + bias = sparse.csc_matrix(np.ones((X.shape[0], 1))) + columns.append(bias) + XP = sparse.hstack(columns, dtype=X.dtype).tocsc() + else: + # Do as if _min_degree = 0 and cut down array after the + # computation, i.e. use _n_out_full instead of n_output_features_. + XP = np.empty( + shape=(n_samples, self._n_out_full), dtype=X.dtype, order=self.order + ) + + # What follows is a faster implementation of: + # for i, comb in enumerate(combinations): + # XP[:, i] = X[:, comb].prod(1) + # This implementation uses two optimisations. + # First one is broadcasting, + # multiply ([X1, ..., Xn], X1) -> [X1 X1, ..., Xn X1] + # multiply ([X2, ..., Xn], X2) -> [X2 X2, ..., Xn X2] + # ... + # multiply ([X[:, start:end], X[:, start]) -> ... + # Second optimisation happens for degrees >= 3. + # Xi^3 is computed reusing previous computation: + # Xi^3 = Xi^2 * Xi. + + # degree 0 term + if self.include_bias: + XP[:, 0] = 1 + current_col = 1 + else: + current_col = 0 + + if self._max_degree == 0: + return XP + + # degree 1 term + XP[:, current_col : current_col + n_features] = X + index = list(range(current_col, current_col + n_features)) + current_col += n_features + index.append(current_col) + + # loop over degree >= 2 terms + for _ in range(2, self._max_degree + 1): + new_index = [] + end = index[-1] + for feature_idx in range(n_features): + start = index[feature_idx] + new_index.append(current_col) + if self.interaction_only: + start += index[feature_idx + 1] - index[feature_idx] + next_col = current_col + end - start + if next_col <= current_col: + break + # XP[:, start:end] are terms of degree d - 1 + # that exclude feature #feature_idx. + np.multiply( + XP[:, start:end], + X[:, feature_idx : feature_idx + 1], + out=XP[:, current_col:next_col], + casting="no", + ) + current_col = next_col + + new_index.append(current_col) + index = new_index + + if self._min_degree > 1: + n_XP, n_Xout = self._n_out_full, self.n_output_features_ + if self.include_bias: + Xout = np.empty( + shape=(n_samples, n_Xout), dtype=XP.dtype, order=self.order + ) + Xout[:, 0] = 1 + Xout[:, 1:] = XP[:, n_XP - n_Xout + 1 :] + else: + Xout = XP[:, n_XP - n_Xout :].copy() + XP = Xout + return XP + + +class SplineTransformer(TransformerMixin, BaseEstimator): + """Generate univariate B-spline bases for features. + + Generate a new feature matrix consisting of + `n_splines=n_knots + degree - 1` (`n_knots - 1` for + `extrapolation="periodic"`) spline basis functions + (B-splines) of polynomial order=`degree` for each feature. + + In order to learn more about the SplineTransformer class go to: + :ref:`sphx_glr_auto_examples_applications_plot_cyclical_feature_engineering.py` + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + n_knots : int, default=5 + Number of knots of the splines if `knots` equals one of + {'uniform', 'quantile'}. Must be larger or equal 2. Ignored if `knots` + is array-like. + + degree : int, default=3 + The polynomial degree of the spline basis. Must be a non-negative + integer. + + knots : {'uniform', 'quantile'} or array-like of shape \ + (n_knots, n_features), default='uniform' + Set knot positions such that first knot <= features <= last knot. + + - If 'uniform', `n_knots` number of knots are distributed uniformly + from min to max values of the features. + - If 'quantile', they are distributed uniformly along the quantiles of + the features. + - If an array-like is given, it directly specifies the sorted knot + positions including the boundary knots. Note that, internally, + `degree` number of knots are added before the first knot, the same + after the last knot. + + extrapolation : {'error', 'constant', 'linear', 'continue', 'periodic'}, \ + default='constant' + If 'error', values outside the min and max values of the training + features raises a `ValueError`. If 'constant', the value of the + splines at minimum and maximum value of the features is used as + constant extrapolation. If 'linear', a linear extrapolation is used. + If 'continue', the splines are extrapolated as is, i.e. option + `extrapolate=True` in :class:`scipy.interpolate.BSpline`. If + 'periodic', periodic splines with a periodicity equal to the distance + between the first and last knot are used. Periodic splines enforce + equal function values and derivatives at the first and last knot. + For example, this makes it possible to avoid introducing an arbitrary + jump between Dec 31st and Jan 1st in spline features derived from a + naturally periodic "day-of-year" input feature. In this case it is + recommended to manually set the knot values to control the period. + + include_bias : bool, default=True + If False, then the last spline element inside the data range + of a feature is dropped. As B-splines sum to one over the spline basis + functions for each data point, they implicitly include a bias term, + i.e. a column of ones. It acts as an intercept term in a linear models. + + order : {'C', 'F'}, default='C' + Order of output array in the dense case. `'F'` order is faster to compute, but + may slow down subsequent estimators. + + sparse_output : bool, default=False + Will return sparse CSR matrix if set True else will return an array. This + option is only available with `scipy>=1.8`. + + .. versionadded:: 1.2 + + Attributes + ---------- + bsplines_ : list of shape (n_features,) + List of BSplines objects, one for each feature. + + n_features_in_ : int + The total number of input features. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_features_out_ : int + The total number of output features, which is computed as + `n_features * n_splines`, where `n_splines` is + the number of bases elements of the B-splines, + `n_knots + degree - 1` for non-periodic splines and + `n_knots - 1` for periodic ones. + If `include_bias=False`, then it is only + `n_features * (n_splines - 1)`. + + See Also + -------- + KBinsDiscretizer : Transformer that bins continuous data into intervals. + + PolynomialFeatures : Transformer that generates polynomial and interaction + features. + + Notes + ----- + High degrees and a high number of knots can cause overfitting. + + See :ref:`examples/linear_model/plot_polynomial_interpolation.py + `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.preprocessing import SplineTransformer + >>> X = np.arange(6).reshape(6, 1) + >>> spline = SplineTransformer(degree=2, n_knots=3) + >>> spline.fit_transform(X) + array([[0.5 , 0.5 , 0. , 0. ], + [0.18, 0.74, 0.08, 0. ], + [0.02, 0.66, 0.32, 0. ], + [0. , 0.32, 0.66, 0.02], + [0. , 0.08, 0.74, 0.18], + [0. , 0. , 0.5 , 0.5 ]]) + """ + + _parameter_constraints: dict = { + "n_knots": [Interval(Integral, 2, None, closed="left")], + "degree": [Interval(Integral, 0, None, closed="left")], + "knots": [StrOptions({"uniform", "quantile"}), "array-like"], + "extrapolation": [ + StrOptions({"error", "constant", "linear", "continue", "periodic"}) + ], + "include_bias": ["boolean"], + "order": [StrOptions({"C", "F"})], + "sparse_output": ["boolean"], + } + + def __init__( + self, + n_knots=5, + degree=3, + *, + knots="uniform", + extrapolation="constant", + include_bias=True, + order="C", + sparse_output=False, + ): + self.n_knots = n_knots + self.degree = degree + self.knots = knots + self.extrapolation = extrapolation + self.include_bias = include_bias + self.order = order + self.sparse_output = sparse_output + + @staticmethod + def _get_base_knot_positions(X, n_knots=10, knots="uniform", sample_weight=None): + """Calculate base knot positions. + + Base knots such that first knot <= feature <= last knot. For the + B-spline construction with scipy.interpolate.BSpline, 2*degree knots + beyond the base interval are added. + + Returns + ------- + knots : ndarray of shape (n_knots, n_features), dtype=np.float64 + Knot positions (points) of base interval. + """ + if knots == "quantile": + percentiles = 100 * np.linspace( + start=0, stop=1, num=n_knots, dtype=np.float64 + ) + + if sample_weight is None: + knots = np.percentile(X, percentiles, axis=0) + else: + knots = np.array( + [ + _weighted_percentile(X, sample_weight, percentile) + for percentile in percentiles + ] + ) + + else: + # knots == 'uniform': + # Note that the variable `knots` has already been validated and + # `else` is therefore safe. + # Disregard observations with zero weight. + mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0 + x_min = np.amin(X[mask], axis=0) + x_max = np.amax(X[mask], axis=0) + + knots = np.linspace( + start=x_min, + stop=x_max, + num=n_knots, + endpoint=True, + dtype=np.float64, + ) + + return knots + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "n_features_in_") + n_splines = self.bsplines_[0].c.shape[1] + + input_features = _check_feature_names_in(self, input_features) + feature_names = [] + for i in range(self.n_features_in_): + for j in range(n_splines - 1 + self.include_bias): + feature_names.append(f"{input_features[i]}_sp_{j}") + return np.asarray(feature_names, dtype=object) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, sample_weight=None): + """Compute knot positions of splines. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data. + + y : None + Ignored. + + sample_weight : array-like of shape (n_samples,), default = None + Individual weights for each sample. Used to calculate quantiles if + `knots="quantile"`. For `knots="uniform"`, zero weighted + observations are ignored for finding the min and max of `X`. + + Returns + ------- + self : object + Fitted transformer. + """ + X = self._validate_data( + X, + reset=True, + accept_sparse=False, + ensure_min_samples=2, + ensure_2d=True, + ) + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + _, n_features = X.shape + + if isinstance(self.knots, str): + base_knots = self._get_base_knot_positions( + X, n_knots=self.n_knots, knots=self.knots, sample_weight=sample_weight + ) + else: + base_knots = check_array(self.knots, dtype=np.float64) + if base_knots.shape[0] < 2: + raise ValueError("Number of knots, knots.shape[0], must be >= 2.") + elif base_knots.shape[1] != n_features: + raise ValueError("knots.shape[1] == n_features is violated.") + elif not np.all(np.diff(base_knots, axis=0) > 0): + raise ValueError("knots must be sorted without duplicates.") + + if self.sparse_output and sp_version < parse_version("1.8.0"): + raise ValueError( + "Option sparse_output=True is only available with scipy>=1.8.0, " + f"but here scipy=={sp_version} is used." + ) + + # number of knots for base interval + n_knots = base_knots.shape[0] + + if self.extrapolation == "periodic" and n_knots <= self.degree: + raise ValueError( + "Periodic splines require degree < n_knots. Got n_knots=" + f"{n_knots} and degree={self.degree}." + ) + + # number of splines basis functions + if self.extrapolation != "periodic": + n_splines = n_knots + self.degree - 1 + else: + # periodic splines have self.degree less degrees of freedom + n_splines = n_knots - 1 + + degree = self.degree + n_out = n_features * n_splines + # We have to add degree number of knots below, and degree number knots + # above the base knots in order to make the spline basis complete. + if self.extrapolation == "periodic": + # For periodic splines the spacing of the first / last degree knots + # needs to be a continuation of the spacing of the last / first + # base knots. + period = base_knots[-1] - base_knots[0] + knots = np.r_[ + base_knots[-(degree + 1) : -1] - period, + base_knots, + base_knots[1 : (degree + 1)] + period, + ] + + else: + # Eilers & Marx in "Flexible smoothing with B-splines and + # penalties" https://doi.org/10.1214/ss/1038425655 advice + # against repeating first and last knot several times, which + # would have inferior behaviour at boundaries if combined with + # a penalty (hence P-Spline). We follow this advice even if our + # splines are unpenalized. Meaning we do not: + # knots = np.r_[ + # np.tile(base_knots.min(axis=0), reps=[degree, 1]), + # base_knots, + # np.tile(base_knots.max(axis=0), reps=[degree, 1]) + # ] + # Instead, we reuse the distance of the 2 fist/last knots. + dist_min = base_knots[1] - base_knots[0] + dist_max = base_knots[-1] - base_knots[-2] + + knots = np.r_[ + np.linspace( + base_knots[0] - degree * dist_min, + base_knots[0] - dist_min, + num=degree, + ), + base_knots, + np.linspace( + base_knots[-1] + dist_max, + base_knots[-1] + degree * dist_max, + num=degree, + ), + ] + + # With a diagonal coefficient matrix, we get back the spline basis + # elements, i.e. the design matrix of the spline. + # Note, BSpline appreciates C-contiguous float64 arrays as c=coef. + coef = np.eye(n_splines, dtype=np.float64) + if self.extrapolation == "periodic": + coef = np.concatenate((coef, coef[:degree, :])) + + extrapolate = self.extrapolation in ["periodic", "continue"] + + bsplines = [ + BSpline.construct_fast( + knots[:, i], coef, self.degree, extrapolate=extrapolate + ) + for i in range(n_features) + ] + self.bsplines_ = bsplines + + self.n_features_out_ = n_out - n_features * (1 - self.include_bias) + return self + + def transform(self, X): + """Transform each feature data to B-splines. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to transform. + + Returns + ------- + XBS : {ndarray, sparse matrix} of shape (n_samples, n_features * n_splines) + The matrix of features, where n_splines is the number of bases + elements of the B-splines, n_knots + degree - 1. + """ + check_is_fitted(self) + + X = self._validate_data(X, reset=False, accept_sparse=False, ensure_2d=True) + + n_samples, n_features = X.shape + n_splines = self.bsplines_[0].c.shape[1] + degree = self.degree + + # TODO: Remove this condition, once scipy 1.10 is the minimum version. + # Only scipy => 1.10 supports design_matrix(.., extrapolate=..). + # The default (implicit in scipy < 1.10) is extrapolate=False. + scipy_1_10 = sp_version >= parse_version("1.10.0") + # Note: self.bsplines_[0].extrapolate is True for extrapolation in + # ["periodic", "continue"] + if scipy_1_10: + use_sparse = self.sparse_output + kwargs_extrapolate = {"extrapolate": self.bsplines_[0].extrapolate} + else: + use_sparse = self.sparse_output and not self.bsplines_[0].extrapolate + kwargs_extrapolate = dict() + + # Note that scipy BSpline returns float64 arrays and converts input + # x=X[:, i] to c-contiguous float64. + n_out = self.n_features_out_ + n_features * (1 - self.include_bias) + if X.dtype in FLOAT_DTYPES: + dtype = X.dtype + else: + dtype = np.float64 + if use_sparse: + output_list = [] + else: + XBS = np.zeros((n_samples, n_out), dtype=dtype, order=self.order) + + for i in range(n_features): + spl = self.bsplines_[i] + + if self.extrapolation in ("continue", "error", "periodic"): + if self.extrapolation == "periodic": + # With periodic extrapolation we map x to the segment + # [spl.t[k], spl.t[n]]. + # This is equivalent to BSpline(.., extrapolate="periodic") + # for scipy>=1.0.0. + n = spl.t.size - spl.k - 1 + # Assign to new array to avoid inplace operation + x = spl.t[spl.k] + (X[:, i] - spl.t[spl.k]) % ( + spl.t[n] - spl.t[spl.k] + ) + else: + x = X[:, i] + + if use_sparse: + XBS_sparse = BSpline.design_matrix( + x, spl.t, spl.k, **kwargs_extrapolate + ) + if self.extrapolation == "periodic": + # See the construction of coef in fit. We need to add the last + # degree spline basis function to the first degree ones and + # then drop the last ones. + # Note: See comment about SparseEfficiencyWarning below. + XBS_sparse = XBS_sparse.tolil() + XBS_sparse[:, :degree] += XBS_sparse[:, -degree:] + XBS_sparse = XBS_sparse[:, :-degree] + else: + XBS[:, (i * n_splines) : ((i + 1) * n_splines)] = spl(x) + else: # extrapolation in ("constant", "linear") + xmin, xmax = spl.t[degree], spl.t[-degree - 1] + # spline values at boundaries + f_min, f_max = spl(xmin), spl(xmax) + mask = (xmin <= X[:, i]) & (X[:, i] <= xmax) + if use_sparse: + mask_inv = ~mask + x = X[:, i].copy() + # Set some arbitrary values outside boundary that will be reassigned + # later. + x[mask_inv] = spl.t[self.degree] + XBS_sparse = BSpline.design_matrix(x, spl.t, spl.k) + # Note: Without converting to lil_matrix we would get: + # scipy.sparse._base.SparseEfficiencyWarning: Changing the sparsity + # structure of a csr_matrix is expensive. lil_matrix is more + # efficient. + if np.any(mask_inv): + XBS_sparse = XBS_sparse.tolil() + XBS_sparse[mask_inv, :] = 0 + else: + XBS[mask, (i * n_splines) : ((i + 1) * n_splines)] = spl(X[mask, i]) + + # Note for extrapolation: + # 'continue' is already returned as is by scipy BSplines + if self.extrapolation == "error": + # BSpline with extrapolate=False does not raise an error, but + # outputs np.nan. + if (use_sparse and np.any(np.isnan(XBS_sparse.data))) or ( + not use_sparse + and np.any( + np.isnan(XBS[:, (i * n_splines) : ((i + 1) * n_splines)]) + ) + ): + raise ValueError( + "X contains values beyond the limits of the knots." + ) + elif self.extrapolation == "constant": + # Set all values beyond xmin and xmax to the value of the + # spline basis functions at those two positions. + # Only the first degree and last degree number of splines + # have non-zero values at the boundaries. + + mask = X[:, i] < xmin + if np.any(mask): + if use_sparse: + # Note: See comment about SparseEfficiencyWarning above. + XBS_sparse = XBS_sparse.tolil() + XBS_sparse[mask, :degree] = f_min[:degree] + + else: + XBS[mask, (i * n_splines) : (i * n_splines + degree)] = f_min[ + :degree + ] + + mask = X[:, i] > xmax + if np.any(mask): + if use_sparse: + # Note: See comment about SparseEfficiencyWarning above. + XBS_sparse = XBS_sparse.tolil() + XBS_sparse[mask, -degree:] = f_max[-degree:] + else: + XBS[ + mask, + ((i + 1) * n_splines - degree) : ((i + 1) * n_splines), + ] = f_max[-degree:] + + elif self.extrapolation == "linear": + # Continue the degree first and degree last spline bases + # linearly beyond the boundaries, with slope = derivative at + # the boundary. + # Note that all others have derivative = value = 0 at the + # boundaries. + + # spline derivatives = slopes at boundaries + fp_min, fp_max = spl(xmin, nu=1), spl(xmax, nu=1) + # Compute the linear continuation. + if degree <= 1: + # For degree=1, the derivative of 2nd spline is not zero at + # boundary. For degree=0 it is the same as 'constant'. + degree += 1 + for j in range(degree): + mask = X[:, i] < xmin + if np.any(mask): + linear_extr = f_min[j] + (X[mask, i] - xmin) * fp_min[j] + if use_sparse: + # Note: See comment about SparseEfficiencyWarning above. + XBS_sparse = XBS_sparse.tolil() + XBS_sparse[mask, j] = linear_extr + else: + XBS[mask, i * n_splines + j] = linear_extr + + mask = X[:, i] > xmax + if np.any(mask): + k = n_splines - 1 - j + linear_extr = f_max[k] + (X[mask, i] - xmax) * fp_max[k] + if use_sparse: + # Note: See comment about SparseEfficiencyWarning above. + XBS_sparse = XBS_sparse.tolil() + XBS_sparse[mask, k : k + 1] = linear_extr[:, None] + else: + XBS[mask, i * n_splines + k] = linear_extr + + if use_sparse: + XBS_sparse = XBS_sparse.tocsr() + output_list.append(XBS_sparse) + + if use_sparse: + # TODO: Remove this conditional error when the minimum supported version of + # SciPy is 1.9.2 + # `scipy.sparse.hstack` breaks in scipy<1.9.2 + # when `n_features_out_ > max_int32` + max_int32 = np.iinfo(np.int32).max + all_int32 = True + for mat in output_list: + all_int32 &= mat.indices.dtype == np.int32 + if ( + sp_version < parse_version("1.9.2") + and self.n_features_out_ > max_int32 + and all_int32 + ): + raise ValueError( + "In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`" + " produces negative columns when:\n1. The output shape contains" + " `n_cols` too large to be represented by a 32bit signed" + " integer.\n. All sub-matrices to be stacked have indices of" + " dtype `np.int32`.\nTo avoid this error, either use a version" + " of scipy `>=1.9.2` or alter the `SplineTransformer`" + " transformer to produce fewer than 2^31 output features" + ) + XBS = sparse.hstack(output_list, format="csr") + elif self.sparse_output: + # TODO: Remove ones scipy 1.10 is the minimum version. See comments above. + XBS = sparse.csr_matrix(XBS) + + if self.include_bias: + return XBS + else: + # We throw away one spline basis per feature. + # We chose the last one. + indices = [j for j in range(XBS.shape[1]) if (j + 1) % n_splines != 0] + return XBS[:, indices] + + def _more_tags(self): + return { + "_xfail_checks": { + "check_estimators_pickle": ( + "Current Scipy implementation of _bsplines does not" + "support const memory views." + ), + } + } diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..b3b7c3d5e7bd911153d9e9724c05cc673c9f3cfd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder.py @@ -0,0 +1,531 @@ +from numbers import Integral, Real + +import numpy as np + +from ..base import OneToOneFeatureMixin, _fit_context +from ..utils._param_validation import Interval, StrOptions +from ..utils.multiclass import type_of_target +from ..utils.validation import ( + _check_feature_names_in, + _check_y, + check_consistent_length, + check_is_fitted, +) +from ._encoders import _BaseEncoder +from ._target_encoder_fast import _fit_encoding_fast, _fit_encoding_fast_auto_smooth + + +class TargetEncoder(OneToOneFeatureMixin, _BaseEncoder): + """Target Encoder for regression and classification targets. + + Each category is encoded based on a shrunk estimate of the average target + values for observations belonging to the category. The encoding scheme mixes + the global target mean with the target mean conditioned on the value of the + category (see [MIC]_). + + When the target type is "multiclass", encodings are based + on the conditional probability estimate for each class. The target is first + binarized using the "one-vs-all" scheme via + :class:`~sklearn.preprocessing.LabelBinarizer`, then the average target + value for each class and each category is used for encoding, resulting in + `n_features` * `n_classes` encoded output features. + + :class:`TargetEncoder` considers missing values, such as `np.nan` or `None`, + as another category and encodes them like any other category. Categories + that are not seen during :meth:`fit` are encoded with the target mean, i.e. + `target_mean_`. + + For a demo on the importance of the `TargetEncoder` internal cross-fitting, + see + :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder_cross_val.py`. + For a comparison of different encoders, refer to + :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`. Read + more in the :ref:`User Guide `. + + .. note:: + `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a + :term:`cross fitting` scheme is used in `fit_transform` for encoding. + See the :ref:`User Guide ` for details. + + .. versionadded:: 1.3 + + Parameters + ---------- + categories : "auto" or list of shape (n_features,) of array-like, default="auto" + Categories (unique values) per feature: + + - `"auto"` : Determine categories automatically from the training data. + - list : `categories[i]` holds the categories expected in the i-th column. The + passed categories should not mix strings and numeric values within a single + feature, and should be sorted in case of numeric values. + + The used categories are stored in the `categories_` fitted attribute. + + target_type : {"auto", "continuous", "binary", "multiclass"}, default="auto" + Type of target. + + - `"auto"` : Type of target is inferred with + :func:`~sklearn.utils.multiclass.type_of_target`. + - `"continuous"` : Continuous target + - `"binary"` : Binary target + - `"multiclass"` : Multiclass target + + .. note:: + The type of target inferred with `"auto"` may not be the desired target + type used for modeling. For example, if the target consisted of integers + between 0 and 100, then :func:`~sklearn.utils.multiclass.type_of_target` + will infer the target as `"multiclass"`. In this case, setting + `target_type="continuous"` will specify the target as a regression + problem. The `target_type_` attribute gives the target type used by the + encoder. + + .. versionchanged:: 1.4 + Added the option 'multiclass'. + + smooth : "auto" or float, default="auto" + The amount of mixing of the target mean conditioned on the value of the + category with the global target mean. A larger `smooth` value will put + more weight on the global target mean. + If `"auto"`, then `smooth` is set to an empirical Bayes estimate. + + cv : int, default=5 + Determines the number of folds in the :term:`cross fitting` strategy used in + :meth:`fit_transform`. For classification targets, `StratifiedKFold` is used + and for continuous targets, `KFold` is used. + + shuffle : bool, default=True + Whether to shuffle the data in :meth:`fit_transform` before splitting into + folds. Note that the samples within each split will not be shuffled. + + random_state : int, RandomState instance or None, default=None + When `shuffle` is True, `random_state` affects the ordering of the + indices, which controls the randomness of each fold. Otherwise, this + parameter has no effect. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + encodings_ : list of shape (n_features,) or (n_features * n_classes) of \ + ndarray + Encodings learnt on all of `X`. + For feature `i`, `encodings_[i]` are the encodings matching the + categories listed in `categories_[i]`. When `target_type_` is + "multiclass", the encoding for feature `i` and class `j` is stored in + `encodings_[j + (i * len(classes_))]`. E.g., for 2 features (f) and + 3 classes (c), encodings are ordered: + f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2, + + categories_ : list of shape (n_features,) of ndarray + The categories of each input feature determined during fitting or + specified in `categories` + (in order of the features in `X` and corresponding with the output + of :meth:`transform`). + + target_type_ : str + Type of target. + + target_mean_ : float + The overall mean of the target. This value is only used in :meth:`transform` + to encode categories. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + classes_ : ndarray or None + If `target_type_` is 'binary' or 'multiclass', holds the label for each class, + otherwise `None`. + + See Also + -------- + OrdinalEncoder : Performs an ordinal (integer) encoding of the categorical features. + Contrary to TargetEncoder, this encoding is not supervised. Treating the + resulting encoding as a numerical features therefore lead arbitrarily + ordered values and therefore typically lead to lower predictive performance + when used as preprocessing for a classifier or regressor. + OneHotEncoder : Performs a one-hot encoding of categorical features. This + unsupervised encoding is better suited for low cardinality categorical + variables as it generate one new feature per unique category. + + References + ---------- + .. [MIC] :doi:`Micci-Barreca, Daniele. "A preprocessing scheme for high-cardinality + categorical attributes in classification and prediction problems" + SIGKDD Explor. Newsl. 3, 1 (July 2001), 27–32. <10.1145/507533.507538>` + + Examples + -------- + With `smooth="auto"`, the smoothing parameter is set to an empirical Bayes estimate: + + >>> import numpy as np + >>> from sklearn.preprocessing import TargetEncoder + >>> X = np.array([["dog"] * 20 + ["cat"] * 30 + ["snake"] * 38], dtype=object).T + >>> y = [90.3] * 5 + [80.1] * 15 + [20.4] * 5 + [20.1] * 25 + [21.2] * 8 + [49] * 30 + >>> enc_auto = TargetEncoder(smooth="auto") + >>> X_trans = enc_auto.fit_transform(X, y) + + >>> # A high `smooth` parameter puts more weight on global mean on the categorical + >>> # encodings: + >>> enc_high_smooth = TargetEncoder(smooth=5000.0).fit(X, y) + >>> enc_high_smooth.target_mean_ + 44... + >>> enc_high_smooth.encodings_ + [array([44..., 44..., 44...])] + + >>> # On the other hand, a low `smooth` parameter puts more weight on target + >>> # conditioned on the value of the categorical: + >>> enc_low_smooth = TargetEncoder(smooth=1.0).fit(X, y) + >>> enc_low_smooth.encodings_ + [array([20..., 80..., 43...])] + """ + + _parameter_constraints: dict = { + "categories": [StrOptions({"auto"}), list], + "target_type": [StrOptions({"auto", "continuous", "binary", "multiclass"})], + "smooth": [StrOptions({"auto"}), Interval(Real, 0, None, closed="left")], + "cv": [Interval(Integral, 2, None, closed="left")], + "shuffle": ["boolean"], + "random_state": ["random_state"], + } + + def __init__( + self, + categories="auto", + target_type="auto", + smooth="auto", + cv=5, + shuffle=True, + random_state=None, + ): + self.categories = categories + self.smooth = smooth + self.target_type = target_type + self.cv = cv + self.shuffle = shuffle + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit the :class:`TargetEncoder` to X and y. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to determine the categories of each feature. + + y : array-like of shape (n_samples,) + The target data used to encode the categories. + + Returns + ------- + self : object + Fitted encoder. + """ + self._fit_encodings_all(X, y) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y): + """Fit :class:`TargetEncoder` and transform X with the target encoding. + + .. note:: + `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a + :term:`cross fitting` scheme is used in `fit_transform` for encoding. + See the :ref:`User Guide `. for details. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to determine the categories of each feature. + + y : array-like of shape (n_samples,) + The target data used to encode the categories. + + Returns + ------- + X_trans : ndarray of shape (n_samples, n_features) or \ + (n_samples, (n_features * n_classes)) + Transformed input. + """ + from ..model_selection import KFold, StratifiedKFold # avoid circular import + + X_ordinal, X_known_mask, y_encoded, n_categories = self._fit_encodings_all(X, y) + + # The cv splitter is voluntarily restricted to *KFold to enforce non + # overlapping validation folds, otherwise the fit_transform output will + # not be well-specified. + if self.target_type_ == "continuous": + cv = KFold(self.cv, shuffle=self.shuffle, random_state=self.random_state) + else: + cv = StratifiedKFold( + self.cv, shuffle=self.shuffle, random_state=self.random_state + ) + + # If 'multiclass' multiply axis=1 by num classes else keep shape the same + if self.target_type_ == "multiclass": + X_out = np.empty( + (X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)), + dtype=np.float64, + ) + else: + X_out = np.empty_like(X_ordinal, dtype=np.float64) + + for train_idx, test_idx in cv.split(X, y): + X_train, y_train = X_ordinal[train_idx, :], y_encoded[train_idx] + y_train_mean = np.mean(y_train, axis=0) + + if self.target_type_ == "multiclass": + encodings = self._fit_encoding_multiclass( + X_train, + y_train, + n_categories, + y_train_mean, + ) + else: + encodings = self._fit_encoding_binary_or_continuous( + X_train, + y_train, + n_categories, + y_train_mean, + ) + self._transform_X_ordinal( + X_out, + X_ordinal, + ~X_known_mask, + test_idx, + encodings, + y_train_mean, + ) + return X_out + + def transform(self, X): + """Transform X with the target encoding. + + .. note:: + `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a + :term:`cross fitting` scheme is used in `fit_transform` for encoding. + See the :ref:`User Guide `. for details. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to determine the categories of each feature. + + Returns + ------- + X_trans : ndarray of shape (n_samples, n_features) or \ + (n_samples, (n_features * n_classes)) + Transformed input. + """ + X_ordinal, X_known_mask = self._transform( + X, handle_unknown="ignore", force_all_finite="allow-nan" + ) + + # If 'multiclass' multiply axis=1 by num of classes else keep shape the same + if self.target_type_ == "multiclass": + X_out = np.empty( + (X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)), + dtype=np.float64, + ) + else: + X_out = np.empty_like(X_ordinal, dtype=np.float64) + + self._transform_X_ordinal( + X_out, + X_ordinal, + ~X_known_mask, + slice(None), + self.encodings_, + self.target_mean_, + ) + return X_out + + def _fit_encodings_all(self, X, y): + """Fit a target encoding with all the data.""" + # avoid circular import + from ..preprocessing import ( + LabelBinarizer, + LabelEncoder, + ) + + check_consistent_length(X, y) + self._fit(X, handle_unknown="ignore", force_all_finite="allow-nan") + + if self.target_type == "auto": + accepted_target_types = ("binary", "multiclass", "continuous") + inferred_type_of_target = type_of_target(y, input_name="y") + if inferred_type_of_target not in accepted_target_types: + raise ValueError( + "Unknown label type: Target type was inferred to be " + f"{inferred_type_of_target!r}. Only {accepted_target_types} are " + "supported." + ) + self.target_type_ = inferred_type_of_target + else: + self.target_type_ = self.target_type + + self.classes_ = None + if self.target_type_ == "binary": + label_encoder = LabelEncoder() + y = label_encoder.fit_transform(y) + self.classes_ = label_encoder.classes_ + elif self.target_type_ == "multiclass": + label_binarizer = LabelBinarizer() + y = label_binarizer.fit_transform(y) + self.classes_ = label_binarizer.classes_ + else: # continuous + y = _check_y(y, y_numeric=True, estimator=self) + + self.target_mean_ = np.mean(y, axis=0) + + X_ordinal, X_known_mask = self._transform( + X, handle_unknown="ignore", force_all_finite="allow-nan" + ) + n_categories = np.fromiter( + (len(category_for_feature) for category_for_feature in self.categories_), + dtype=np.int64, + count=len(self.categories_), + ) + if self.target_type_ == "multiclass": + encodings = self._fit_encoding_multiclass( + X_ordinal, + y, + n_categories, + self.target_mean_, + ) + else: + encodings = self._fit_encoding_binary_or_continuous( + X_ordinal, + y, + n_categories, + self.target_mean_, + ) + self.encodings_ = encodings + + return X_ordinal, X_known_mask, y, n_categories + + def _fit_encoding_binary_or_continuous( + self, X_ordinal, y, n_categories, target_mean + ): + """Learn target encodings.""" + if self.smooth == "auto": + y_variance = np.var(y) + encodings = _fit_encoding_fast_auto_smooth( + X_ordinal, + y, + n_categories, + target_mean, + y_variance, + ) + else: + encodings = _fit_encoding_fast( + X_ordinal, + y, + n_categories, + self.smooth, + target_mean, + ) + return encodings + + def _fit_encoding_multiclass(self, X_ordinal, y, n_categories, target_mean): + """Learn multiclass encodings. + + Learn encodings for each class (c) then reorder encodings such that + the same features (f) are grouped together. `reorder_index` enables + converting from: + f0_c0, f1_c0, f0_c1, f1_c1, f0_c2, f1_c2 + to: + f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2 + """ + n_features = self.n_features_in_ + n_classes = len(self.classes_) + + encodings = [] + for i in range(n_classes): + y_class = y[:, i] + encoding = self._fit_encoding_binary_or_continuous( + X_ordinal, + y_class, + n_categories, + target_mean[i], + ) + encodings.extend(encoding) + + reorder_index = ( + idx + for start in range(n_features) + for idx in range(start, (n_classes * n_features), n_features) + ) + return [encodings[idx] for idx in reorder_index] + + def _transform_X_ordinal( + self, + X_out, + X_ordinal, + X_unknown_mask, + row_indices, + encodings, + target_mean, + ): + """Transform X_ordinal using encodings. + + In the multiclass case, `X_ordinal` and `X_unknown_mask` have column + (axis=1) size `n_features`, while `encodings` has length of size + `n_features * n_classes`. `feat_idx` deals with this by repeating + feature indices by `n_classes` E.g., for 3 features, 2 classes: + 0,0,1,1,2,2 + + Additionally, `target_mean` is of shape (`n_classes`,) so `mean_idx` + cycles through 0 to `n_classes` - 1, `n_features` times. + """ + if self.target_type_ == "multiclass": + n_classes = len(self.classes_) + for e_idx, encoding in enumerate(encodings): + # Repeat feature indices by n_classes + feat_idx = e_idx // n_classes + # Cycle through each class + mean_idx = e_idx % n_classes + X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, feat_idx]] + X_out[X_unknown_mask[:, feat_idx], e_idx] = target_mean[mean_idx] + else: + for e_idx, encoding in enumerate(encodings): + X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, e_idx]] + X_out[X_unknown_mask[:, e_idx], e_idx] = target_mean + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Not used, present here for API consistency by convention. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. `feature_names_in_` is used unless it is + not defined, in which case the following input feature names are + generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + When `type_of_target_` is "multiclass" the names are of the format + '_'. + """ + check_is_fitted(self, "n_features_in_") + feature_names = _check_feature_names_in(self, input_features) + if self.target_type_ == "multiclass": + feature_names = [ + f"{feature_name}_{class_name}" + for feature_name in feature_names + for class_name in self.classes_ + ] + return np.asarray(feature_names, dtype=object) + else: + return feature_names + + def _more_tags(self): + return { + "requires_y": True, + } diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder_fast.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..13734c9ef32d972fbf5777bede89232bf5a5d141 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..deaf8ecd1cccb871eb4661484c7220de88673dfa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_polynomial.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_polynomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51804fa161c9d8a225e073e4643d9cf1937fd323 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_polynomial.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_common.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..09f702f64ce2367ef6fe47fdb789e0475bf11def --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_common.py @@ -0,0 +1,187 @@ +import warnings + +import numpy as np +import pytest + +from sklearn.base import clone +from sklearn.datasets import load_iris +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import ( + MaxAbsScaler, + MinMaxScaler, + PowerTransformer, + QuantileTransformer, + RobustScaler, + StandardScaler, + maxabs_scale, + minmax_scale, + power_transform, + quantile_transform, + robust_scale, + scale, +) +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import ( + BSR_CONTAINERS, + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DIA_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) + +iris = load_iris() + + +def _get_valid_samples_by_column(X, col): + """Get non NaN samples in column of X""" + return X[:, [col]][~np.isnan(X[:, col])] + + +@pytest.mark.parametrize( + "est, func, support_sparse, strictly_positive, omit_kwargs", + [ + (MaxAbsScaler(), maxabs_scale, True, False, []), + (MinMaxScaler(), minmax_scale, False, False, ["clip"]), + (StandardScaler(), scale, False, False, []), + (StandardScaler(with_mean=False), scale, True, False, []), + (PowerTransformer("yeo-johnson"), power_transform, False, False, []), + (PowerTransformer("box-cox"), power_transform, False, True, []), + (QuantileTransformer(n_quantiles=10), quantile_transform, True, False, []), + (RobustScaler(), robust_scale, False, False, []), + (RobustScaler(with_centering=False), robust_scale, True, False, []), + ], +) +def test_missing_value_handling( + est, func, support_sparse, strictly_positive, omit_kwargs +): + # check that the preprocessing method let pass nan + rng = np.random.RandomState(42) + X = iris.data.copy() + n_missing = 50 + X[ + rng.randint(X.shape[0], size=n_missing), rng.randint(X.shape[1], size=n_missing) + ] = np.nan + if strictly_positive: + X += np.nanmin(X) + 0.1 + X_train, X_test = train_test_split(X, random_state=1) + # sanity check + assert not np.all(np.isnan(X_train), axis=0).any() + assert np.any(np.isnan(X_train), axis=0).all() + assert np.any(np.isnan(X_test), axis=0).all() + X_test[:, 0] = np.nan # make sure this boundary case is tested + + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + Xt = est.fit(X_train).transform(X_test) + # ensure no warnings are raised + # missing values should still be missing, and only them + assert_array_equal(np.isnan(Xt), np.isnan(X_test)) + + # check that the function leads to the same results as the class + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + Xt_class = est.transform(X_train) + kwargs = est.get_params() + # remove the parameters which should be omitted because they + # are not defined in the counterpart function of the preprocessing class + for kwarg in omit_kwargs: + _ = kwargs.pop(kwarg) + Xt_func = func(X_train, **kwargs) + assert_array_equal(np.isnan(Xt_func), np.isnan(Xt_class)) + assert_allclose(Xt_func[~np.isnan(Xt_func)], Xt_class[~np.isnan(Xt_class)]) + + # check that the inverse transform keep NaN + Xt_inv = est.inverse_transform(Xt) + assert_array_equal(np.isnan(Xt_inv), np.isnan(X_test)) + # FIXME: we can introduce equal_nan=True in recent version of numpy. + # For the moment which just check that non-NaN values are almost equal. + assert_allclose(Xt_inv[~np.isnan(Xt_inv)], X_test[~np.isnan(X_test)]) + + for i in range(X.shape[1]): + # train only on non-NaN + est.fit(_get_valid_samples_by_column(X_train, i)) + # check transforming with NaN works even when training without NaN + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + Xt_col = est.transform(X_test[:, [i]]) + assert_allclose(Xt_col, Xt[:, [i]]) + # check non-NaN is handled as before - the 1st column is all nan + if not np.isnan(X_test[:, i]).all(): + Xt_col_nonan = est.transform(_get_valid_samples_by_column(X_test, i)) + assert_array_equal(Xt_col_nonan, Xt_col[~np.isnan(Xt_col.squeeze())]) + + if support_sparse: + est_dense = clone(est) + est_sparse = clone(est) + + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + Xt_dense = est_dense.fit(X_train).transform(X_test) + Xt_inv_dense = est_dense.inverse_transform(Xt_dense) + + for sparse_container in ( + BSR_CONTAINERS + + COO_CONTAINERS + + CSC_CONTAINERS + + CSR_CONTAINERS + + DIA_CONTAINERS + + DOK_CONTAINERS + + LIL_CONTAINERS + ): + # check that the dense and sparse inputs lead to the same results + # precompute the matrix to avoid catching side warnings + X_train_sp = sparse_container(X_train) + X_test_sp = sparse_container(X_test) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", PendingDeprecationWarning) + warnings.simplefilter("error", RuntimeWarning) + Xt_sp = est_sparse.fit(X_train_sp).transform(X_test_sp) + + assert_allclose(Xt_sp.toarray(), Xt_dense) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", PendingDeprecationWarning) + warnings.simplefilter("error", RuntimeWarning) + Xt_inv_sp = est_sparse.inverse_transform(Xt_sp) + + assert_allclose(Xt_inv_sp.toarray(), Xt_inv_dense) + + +@pytest.mark.parametrize( + "est, func", + [ + (MaxAbsScaler(), maxabs_scale), + (MinMaxScaler(), minmax_scale), + (StandardScaler(), scale), + (StandardScaler(with_mean=False), scale), + (PowerTransformer("yeo-johnson"), power_transform), + ( + PowerTransformer("box-cox"), + power_transform, + ), + (QuantileTransformer(n_quantiles=3), quantile_transform), + (RobustScaler(), robust_scale), + (RobustScaler(with_centering=False), robust_scale), + ], +) +def test_missing_value_pandas_na_support(est, func): + # Test pandas IntegerArray with pd.NA + pd = pytest.importorskip("pandas") + + X = np.array( + [ + [1, 2, 3, np.nan, np.nan, 4, 5, 1], + [np.nan, np.nan, 8, 4, 6, np.nan, np.nan, 8], + [1, 2, 3, 4, 5, 6, 7, 8], + ] + ).T + + # Creates dataframe with IntegerArrays with pd.NA + X_df = pd.DataFrame(X, dtype="Int16", columns=["a", "b", "c"]) + X_df["c"] = X_df["c"].astype("int") + + X_trans = est.fit_transform(X) + X_df_trans = est.fit_transform(X_df) + + assert_allclose(X_trans, X_df_trans) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_data.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..24d8ab2a36c3ac5d98e8b0ac373cc185b48eb810 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_data.py @@ -0,0 +1,2593 @@ +# Authors: +# +# Giorgio Patrini +# +# License: BSD 3 clause + +import re +import warnings + +import numpy as np +import numpy.linalg as la +import pytest +from scipy import sparse, stats + +from sklearn import datasets +from sklearn.base import clone +from sklearn.exceptions import NotFittedError +from sklearn.metrics.pairwise import linear_kernel +from sklearn.model_selection import cross_val_predict +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import ( + Binarizer, + KernelCenterer, + MaxAbsScaler, + MinMaxScaler, + Normalizer, + PowerTransformer, + QuantileTransformer, + RobustScaler, + StandardScaler, + add_dummy_feature, + maxabs_scale, + minmax_scale, + normalize, + power_transform, + quantile_transform, + robust_scale, + scale, +) +from sklearn.preprocessing._data import BOUNDS_THRESHOLD, _handle_zeros_in_scale +from sklearn.svm import SVR +from sklearn.utils import gen_batches, shuffle +from sklearn.utils._array_api import ( + yield_namespace_device_dtype_combinations, +) +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_allclose_dense_sparse, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, + skip_if_32bit, +) +from sklearn.utils.estimator_checks import ( + _get_check_estimator_ids, + check_array_api_input_and_values, +) +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + LIL_CONTAINERS, +) +from sklearn.utils.sparsefuncs import mean_variance_axis + +iris = datasets.load_iris() + +# Make some data to be used many times +rng = np.random.RandomState(0) +n_features = 30 +n_samples = 1000 +offsets = rng.uniform(-1, 1, size=n_features) +scales = rng.uniform(1, 10, size=n_features) +X_2d = rng.randn(n_samples, n_features) * scales + offsets +X_1row = X_2d[0, :].reshape(1, n_features) +X_1col = X_2d[:, 0].reshape(n_samples, 1) +X_list_1row = X_1row.tolist() +X_list_1col = X_1col.tolist() + + +def toarray(a): + if hasattr(a, "toarray"): + a = a.toarray() + return a + + +def _check_dim_1axis(a): + return np.asarray(a).shape[0] + + +def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size, n_samples_seen): + if batch_stop != n: + assert (i + 1) * chunk_size == n_samples_seen + else: + assert i * chunk_size + (batch_stop - batch_start) == n_samples_seen + + +def test_raises_value_error_if_sample_weights_greater_than_1d(): + # Sample weights must be either scalar or 1D + + n_sampless = [2, 3] + n_featuress = [3, 2] + + for n_samples, n_features in zip(n_sampless, n_featuress): + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) + + scaler = StandardScaler() + + # make sure Error is raised the sample weights greater than 1d + sample_weight_notOK = rng.randn(n_samples, 1) ** 2 + with pytest.raises(ValueError): + scaler.fit(X, y, sample_weight=sample_weight_notOK) + + +@pytest.mark.parametrize( + ["Xw", "X", "sample_weight"], + [ + ([[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [1, 2, 3], [4, 5, 6]], [2.0, 1.0]), + ( + [[1, 0, 1], [0, 0, 1]], + [[1, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]], + np.array([1, 3]), + ), + ( + [[1, np.nan, 1], [np.nan, np.nan, 1]], + [ + [1, np.nan, 1], + [np.nan, np.nan, 1], + [np.nan, np.nan, 1], + [np.nan, np.nan, 1], + ], + np.array([1, 3]), + ), + ], +) +@pytest.mark.parametrize("array_constructor", ["array", "sparse_csr", "sparse_csc"]) +def test_standard_scaler_sample_weight(Xw, X, sample_weight, array_constructor): + with_mean = not array_constructor.startswith("sparse") + X = _convert_container(X, array_constructor) + Xw = _convert_container(Xw, array_constructor) + + # weighted StandardScaler + yw = np.ones(Xw.shape[0]) + scaler_w = StandardScaler(with_mean=with_mean) + scaler_w.fit(Xw, yw, sample_weight=sample_weight) + + # unweighted, but with repeated samples + y = np.ones(X.shape[0]) + scaler = StandardScaler(with_mean=with_mean) + scaler.fit(X, y) + + X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]] + + assert_almost_equal(scaler.mean_, scaler_w.mean_) + assert_almost_equal(scaler.var_, scaler_w.var_) + assert_almost_equal(scaler.transform(X_test), scaler_w.transform(X_test)) + + +def test_standard_scaler_1d(): + # Test scaling of dataset along single axis + for X in [X_1row, X_1col, X_list_1row, X_list_1row]: + scaler = StandardScaler() + X_scaled = scaler.fit(X).transform(X, copy=True) + + if isinstance(X, list): + X = np.array(X) # cast only after scaling done + + if _check_dim_1axis(X) == 1: + assert_almost_equal(scaler.mean_, X.ravel()) + assert_almost_equal(scaler.scale_, np.ones(n_features)) + assert_array_almost_equal(X_scaled.mean(axis=0), np.zeros_like(n_features)) + assert_array_almost_equal(X_scaled.std(axis=0), np.zeros_like(n_features)) + else: + assert_almost_equal(scaler.mean_, X.mean()) + assert_almost_equal(scaler.scale_, X.std()) + assert_array_almost_equal(X_scaled.mean(axis=0), np.zeros_like(n_features)) + assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) + assert_array_almost_equal(X_scaled.std(axis=0), 1.0) + assert scaler.n_samples_seen_ == X.shape[0] + + # check inverse transform + X_scaled_back = scaler.inverse_transform(X_scaled) + assert_array_almost_equal(X_scaled_back, X) + + # Constant feature + X = np.ones((5, 1)) + scaler = StandardScaler() + X_scaled = scaler.fit(X).transform(X, copy=True) + assert_almost_equal(scaler.mean_, 1.0) + assert_almost_equal(scaler.scale_, 1.0) + assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) + assert_array_almost_equal(X_scaled.std(axis=0), 0.0) + assert scaler.n_samples_seen_ == X.shape[0] + + +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) +@pytest.mark.parametrize("add_sample_weight", [False, True]) +def test_standard_scaler_dtype(add_sample_weight, sparse_container): + # Ensure scaling does not affect dtype + rng = np.random.RandomState(0) + n_samples = 10 + n_features = 3 + if add_sample_weight: + sample_weight = np.ones(n_samples) + else: + sample_weight = None + with_mean = True + if sparse_container is not None: + # scipy sparse containers do not support float16, see + # https://github.com/scipy/scipy/issues/7408 for more details. + supported_dtype = [np.float64, np.float32] + else: + supported_dtype = [np.float64, np.float32, np.float16] + for dtype in supported_dtype: + X = rng.randn(n_samples, n_features).astype(dtype) + if sparse_container is not None: + X = sparse_container(X) + with_mean = False + + scaler = StandardScaler(with_mean=with_mean) + X_scaled = scaler.fit(X, sample_weight=sample_weight).transform(X) + assert X.dtype == X_scaled.dtype + assert scaler.mean_.dtype == np.float64 + assert scaler.scale_.dtype == np.float64 + + +@pytest.mark.parametrize( + "scaler", + [ + StandardScaler(with_mean=False), + RobustScaler(with_centering=False), + ], +) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) +@pytest.mark.parametrize("add_sample_weight", [False, True]) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +@pytest.mark.parametrize("constant", [0, 1.0, 100.0]) +def test_standard_scaler_constant_features( + scaler, add_sample_weight, sparse_container, dtype, constant +): + if isinstance(scaler, RobustScaler) and add_sample_weight: + pytest.skip(f"{scaler.__class__.__name__} does not yet support sample_weight") + + rng = np.random.RandomState(0) + n_samples = 100 + n_features = 1 + if add_sample_weight: + fit_params = dict(sample_weight=rng.uniform(size=n_samples) * 2) + else: + fit_params = {} + X_array = np.full(shape=(n_samples, n_features), fill_value=constant, dtype=dtype) + X = X_array if sparse_container is None else sparse_container(X_array) + X_scaled = scaler.fit(X, **fit_params).transform(X) + + if isinstance(scaler, StandardScaler): + # The variance info should be close to zero for constant features. + assert_allclose(scaler.var_, np.zeros(X.shape[1]), atol=1e-7) + + # Constant features should not be scaled (scale of 1.): + assert_allclose(scaler.scale_, np.ones(X.shape[1])) + + assert X_scaled is not X # make sure we make a copy + assert_allclose_dense_sparse(X_scaled, X) + + if isinstance(scaler, StandardScaler) and not add_sample_weight: + # Also check consistency with the standard scale function. + X_scaled_2 = scale(X, with_mean=scaler.with_mean) + assert X_scaled_2 is not X # make sure we did a copy + assert_allclose_dense_sparse(X_scaled_2, X) + + +@pytest.mark.parametrize("n_samples", [10, 100, 10_000]) +@pytest.mark.parametrize("average", [1e-10, 1, 1e10]) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) +def test_standard_scaler_near_constant_features( + n_samples, sparse_container, average, dtype +): + # Check that when the variance is too small (var << mean**2) the feature + # is considered constant and not scaled. + + scale_min, scale_max = -30, 19 + scales = np.array([10**i for i in range(scale_min, scale_max + 1)], dtype=dtype) + + n_features = scales.shape[0] + X = np.empty((n_samples, n_features), dtype=dtype) + # Make a dataset of known var = scales**2 and mean = average + X[: n_samples // 2, :] = average + scales + X[n_samples // 2 :, :] = average - scales + X_array = X if sparse_container is None else sparse_container(X) + + scaler = StandardScaler(with_mean=False).fit(X_array) + + # StandardScaler uses float64 accumulators even if the data has a float32 + # dtype. + eps = np.finfo(np.float64).eps + + # if var < bound = N.eps.var + N².eps².mean², the feature is considered + # constant and the scale_ attribute is set to 1. + bounds = n_samples * eps * scales**2 + n_samples**2 * eps**2 * average**2 + within_bounds = scales**2 <= bounds + + # Check that scale_min is small enough to have some scales below the + # bound and therefore detected as constant: + assert np.any(within_bounds) + + # Check that such features are actually treated as constant by the scaler: + assert all(scaler.var_[within_bounds] <= bounds[within_bounds]) + assert_allclose(scaler.scale_[within_bounds], 1.0) + + # Depending the on the dtype of X, some features might not actually be + # representable as non constant for small scales (even if above the + # precision bound of the float64 variance estimate). Such feature should + # be correctly detected as constants with 0 variance by StandardScaler. + representable_diff = X[0, :] - X[-1, :] != 0 + assert_allclose(scaler.var_[np.logical_not(representable_diff)], 0) + assert_allclose(scaler.scale_[np.logical_not(representable_diff)], 1) + + # The other features are scaled and scale_ is equal to sqrt(var_) assuming + # that scales are large enough for average + scale and average - scale to + # be distinct in X (depending on X's dtype). + common_mask = np.logical_and(scales**2 > bounds, representable_diff) + assert_allclose(scaler.scale_[common_mask], np.sqrt(scaler.var_)[common_mask]) + + +def test_scale_1d(): + # 1-d inputs + X_list = [1.0, 3.0, 5.0, 0.0] + X_arr = np.array(X_list) + + for X in [X_list, X_arr]: + X_scaled = scale(X) + assert_array_almost_equal(X_scaled.mean(), 0.0) + assert_array_almost_equal(X_scaled.std(), 1.0) + assert_array_equal(scale(X, with_mean=False, with_std=False), X) + + +@skip_if_32bit +def test_standard_scaler_numerical_stability(): + # Test numerical stability of scaling + # np.log(1e-5) is taken because of its floating point representation + # was empirically found to cause numerical problems with np.mean & np.std. + x = np.full(8, np.log(1e-5), dtype=np.float64) + # This does not raise a warning as the number of samples is too low + # to trigger the problem in recent numpy + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + scale(x) + assert_array_almost_equal(scale(x), np.zeros(8)) + + # with 2 more samples, the std computation run into numerical issues: + x = np.full(10, np.log(1e-5), dtype=np.float64) + warning_message = "standard deviation of the data is probably very close to 0" + with pytest.warns(UserWarning, match=warning_message): + x_scaled = scale(x) + assert_array_almost_equal(x_scaled, np.zeros(10)) + + x = np.full(10, 1e-100, dtype=np.float64) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + x_small_scaled = scale(x) + assert_array_almost_equal(x_small_scaled, np.zeros(10)) + + # Large values can cause (often recoverable) numerical stability issues: + x_big = np.full(10, 1e100, dtype=np.float64) + warning_message = "Dataset may contain too large values" + with pytest.warns(UserWarning, match=warning_message): + x_big_scaled = scale(x_big) + assert_array_almost_equal(x_big_scaled, np.zeros(10)) + assert_array_almost_equal(x_big_scaled, x_small_scaled) + with pytest.warns(UserWarning, match=warning_message): + x_big_centered = scale(x_big, with_std=False) + assert_array_almost_equal(x_big_centered, np.zeros(10)) + assert_array_almost_equal(x_big_centered, x_small_scaled) + + +def test_scaler_2d_arrays(): + # Test scaling of 2d array along first axis + rng = np.random.RandomState(0) + n_features = 5 + n_samples = 4 + X = rng.randn(n_samples, n_features) + X[:, 0] = 0.0 # first feature is always of zero + + scaler = StandardScaler() + X_scaled = scaler.fit(X).transform(X, copy=True) + assert not np.any(np.isnan(X_scaled)) + assert scaler.n_samples_seen_ == n_samples + + assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) + assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) + # Check that X has been copied + assert X_scaled is not X + + # check inverse transform + X_scaled_back = scaler.inverse_transform(X_scaled) + assert X_scaled_back is not X + assert X_scaled_back is not X_scaled + assert_array_almost_equal(X_scaled_back, X) + + X_scaled = scale(X, axis=1, with_std=False) + assert not np.any(np.isnan(X_scaled)) + assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0]) + X_scaled = scale(X, axis=1, with_std=True) + assert not np.any(np.isnan(X_scaled)) + assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0]) + assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0]) + # Check that the data hasn't been modified + assert X_scaled is not X + + X_scaled = scaler.fit(X).transform(X, copy=False) + assert not np.any(np.isnan(X_scaled)) + assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) + assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) + # Check that X has not been copied + assert X_scaled is X + + X = rng.randn(4, 5) + X[:, 0] = 1.0 # first feature is a constant, non zero feature + scaler = StandardScaler() + X_scaled = scaler.fit(X).transform(X, copy=True) + assert not np.any(np.isnan(X_scaled)) + assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) + assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) + # Check that X has not been copied + assert X_scaled is not X + + +def test_scaler_float16_overflow(): + # Test if the scaler will not overflow on float16 numpy arrays + rng = np.random.RandomState(0) + # float16 has a maximum of 65500.0. On the worst case 5 * 200000 is 100000 + # which is enough to overflow the data type + X = rng.uniform(5, 10, [200000, 1]).astype(np.float16) + + with np.errstate(over="raise"): + scaler = StandardScaler().fit(X) + X_scaled = scaler.transform(X) + + # Calculate the float64 equivalent to verify result + X_scaled_f64 = StandardScaler().fit_transform(X.astype(np.float64)) + + # Overflow calculations may cause -inf, inf, or nan. Since there is no nan + # input, all of the outputs should be finite. This may be redundant since a + # FloatingPointError exception will be thrown on overflow above. + assert np.all(np.isfinite(X_scaled)) + + # The normal distribution is very unlikely to go above 4. At 4.0-8.0 the + # float16 precision is 2^-8 which is around 0.004. Thus only 2 decimals are + # checked to account for precision differences. + assert_array_almost_equal(X_scaled, X_scaled_f64, decimal=2) + + +def test_handle_zeros_in_scale(): + s1 = np.array([0, 1e-16, 1, 2, 3]) + s2 = _handle_zeros_in_scale(s1, copy=True) + + assert_allclose(s1, np.array([0, 1e-16, 1, 2, 3])) + assert_allclose(s2, np.array([1, 1, 1, 2, 3])) + + +def test_minmax_scaler_partial_fit(): + # Test if partial_fit run over many batches of size 1 and 50 + # gives the same results as fit + X = X_2d + n = X.shape[0] + + for chunk_size in [1, 2, 50, n, n + 42]: + # Test mean at the end of the process + scaler_batch = MinMaxScaler().fit(X) + + scaler_incr = MinMaxScaler() + for batch in gen_batches(n_samples, chunk_size): + scaler_incr = scaler_incr.partial_fit(X[batch]) + + assert_array_almost_equal(scaler_batch.data_min_, scaler_incr.data_min_) + assert_array_almost_equal(scaler_batch.data_max_, scaler_incr.data_max_) + assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ + assert_array_almost_equal(scaler_batch.data_range_, scaler_incr.data_range_) + assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) + assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_) + + # Test std after 1 step + batch0 = slice(0, chunk_size) + scaler_batch = MinMaxScaler().fit(X[batch0]) + scaler_incr = MinMaxScaler().partial_fit(X[batch0]) + + assert_array_almost_equal(scaler_batch.data_min_, scaler_incr.data_min_) + assert_array_almost_equal(scaler_batch.data_max_, scaler_incr.data_max_) + assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ + assert_array_almost_equal(scaler_batch.data_range_, scaler_incr.data_range_) + assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) + assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_) + + # Test std until the end of partial fits, and + scaler_batch = MinMaxScaler().fit(X) + scaler_incr = MinMaxScaler() # Clean estimator + for i, batch in enumerate(gen_batches(n_samples, chunk_size)): + scaler_incr = scaler_incr.partial_fit(X[batch]) + assert_correct_incr( + i, + batch_start=batch.start, + batch_stop=batch.stop, + n=n, + chunk_size=chunk_size, + n_samples_seen=scaler_incr.n_samples_seen_, + ) + + +def test_standard_scaler_partial_fit(): + # Test if partial_fit run over many batches of size 1 and 50 + # gives the same results as fit + X = X_2d + n = X.shape[0] + + for chunk_size in [1, 2, 50, n, n + 42]: + # Test mean at the end of the process + scaler_batch = StandardScaler(with_std=False).fit(X) + + scaler_incr = StandardScaler(with_std=False) + for batch in gen_batches(n_samples, chunk_size): + scaler_incr = scaler_incr.partial_fit(X[batch]) + assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_) + assert scaler_batch.var_ == scaler_incr.var_ # Nones + assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ + + # Test std after 1 step + batch0 = slice(0, chunk_size) + scaler_incr = StandardScaler().partial_fit(X[batch0]) + if chunk_size == 1: + assert_array_almost_equal( + np.zeros(n_features, dtype=np.float64), scaler_incr.var_ + ) + assert_array_almost_equal( + np.ones(n_features, dtype=np.float64), scaler_incr.scale_ + ) + else: + assert_array_almost_equal(np.var(X[batch0], axis=0), scaler_incr.var_) + assert_array_almost_equal( + np.std(X[batch0], axis=0), scaler_incr.scale_ + ) # no constants + + # Test std until the end of partial fits, and + scaler_batch = StandardScaler().fit(X) + scaler_incr = StandardScaler() # Clean estimator + for i, batch in enumerate(gen_batches(n_samples, chunk_size)): + scaler_incr = scaler_incr.partial_fit(X[batch]) + assert_correct_incr( + i, + batch_start=batch.start, + batch_stop=batch.stop, + n=n, + chunk_size=chunk_size, + n_samples_seen=scaler_incr.n_samples_seen_, + ) + + assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_) + assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ + + +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_standard_scaler_partial_fit_numerical_stability(sparse_container): + # Test if the incremental computation introduces significative errors + # for large datasets with values of large magniture + rng = np.random.RandomState(0) + n_features = 2 + n_samples = 100 + offsets = rng.uniform(-1e15, 1e15, size=n_features) + scales = rng.uniform(1e3, 1e6, size=n_features) + X = rng.randn(n_samples, n_features) * scales + offsets + + scaler_batch = StandardScaler().fit(X) + scaler_incr = StandardScaler() + for chunk in X: + scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features)) + + # Regardless of abs values, they must not be more diff 6 significant digits + tol = 10 ** (-6) + assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol) + assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol) + assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol) + # NOTE Be aware that for much larger offsets std is very unstable (last + # assert) while mean is OK. + + # Sparse input + size = (100, 3) + scale = 1e20 + X = sparse_container(rng.randint(0, 2, size).astype(np.float64) * scale) + + # with_mean=False is required with sparse input + scaler = StandardScaler(with_mean=False).fit(X) + scaler_incr = StandardScaler(with_mean=False) + + for chunk in X: + scaler_incr = scaler_incr.partial_fit(chunk) + + # Regardless of magnitude, they must not differ more than of 6 digits + tol = 10 ** (-6) + assert scaler.mean_ is not None + assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol) + assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol) + + +@pytest.mark.parametrize("sample_weight", [True, None]) +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_partial_fit_sparse_input(sample_weight, sparse_container): + # Check that sparsity is not destroyed + X = sparse_container(np.array([[1.0], [0.0], [0.0], [5.0]])) + + if sample_weight: + sample_weight = rng.rand(X.shape[0]) + + null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) + X_null = null_transform.partial_fit(X, sample_weight=sample_weight).transform(X) + assert_array_equal(X_null.toarray(), X.toarray()) + X_orig = null_transform.inverse_transform(X_null) + assert_array_equal(X_orig.toarray(), X_null.toarray()) + assert_array_equal(X_orig.toarray(), X.toarray()) + + +@pytest.mark.parametrize("sample_weight", [True, None]) +def test_standard_scaler_trasform_with_partial_fit(sample_weight): + # Check some postconditions after applying partial_fit and transform + X = X_2d[:100, :] + + if sample_weight: + sample_weight = rng.rand(X.shape[0]) + + scaler_incr = StandardScaler() + for i, batch in enumerate(gen_batches(X.shape[0], 1)): + X_sofar = X[: (i + 1), :] + chunks_copy = X_sofar.copy() + if sample_weight is None: + scaled_batch = StandardScaler().fit_transform(X_sofar) + scaler_incr = scaler_incr.partial_fit(X[batch]) + else: + scaled_batch = StandardScaler().fit_transform( + X_sofar, sample_weight=sample_weight[: i + 1] + ) + scaler_incr = scaler_incr.partial_fit( + X[batch], sample_weight=sample_weight[batch] + ) + scaled_incr = scaler_incr.transform(X_sofar) + + assert_array_almost_equal(scaled_batch, scaled_incr) + assert_array_almost_equal(X_sofar, chunks_copy) # No change + right_input = scaler_incr.inverse_transform(scaled_incr) + assert_array_almost_equal(X_sofar, right_input) + + zero = np.zeros(X.shape[1]) + epsilon = np.finfo(float).eps + assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal + assert_array_less(zero, scaler_incr.scale_ + epsilon) + if sample_weight is None: + # (i+1) because the Scaler has been already fitted + assert (i + 1) == scaler_incr.n_samples_seen_ + else: + assert np.sum(sample_weight[: i + 1]) == pytest.approx( + scaler_incr.n_samples_seen_ + ) + + +def test_standard_check_array_of_inverse_transform(): + # Check if StandardScaler inverse_transform is + # converting the integer array to float + x = np.array( + [ + [1, 1, 1, 0, 1, 0], + [1, 1, 1, 0, 1, 0], + [0, 8, 0, 1, 0, 0], + [1, 4, 1, 1, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 4, 0, 1, 0, 1], + ], + dtype=np.int32, + ) + + scaler = StandardScaler() + scaler.fit(x) + + # The of inverse_transform should be converted + # to a float array. + # If not X *= self.scale_ will fail. + scaler.inverse_transform(x) + + +@pytest.mark.parametrize( + "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations() +) +@pytest.mark.parametrize( + "check", + [check_array_api_input_and_values], + ids=_get_check_estimator_ids, +) +@pytest.mark.parametrize( + "estimator", + [ + MaxAbsScaler(), + MinMaxScaler(), + KernelCenterer(), + Normalizer(norm="l1"), + Normalizer(norm="l2"), + Normalizer(norm="max"), + ], + ids=_get_check_estimator_ids, +) +def test_scaler_array_api_compliance( + estimator, check, array_namespace, device, dtype_name +): + name = estimator.__class__.__name__ + check(name, estimator, array_namespace, device=device, dtype_name=dtype_name) + + +def test_min_max_scaler_iris(): + X = iris.data + scaler = MinMaxScaler() + # default params + X_trans = scaler.fit_transform(X) + assert_array_almost_equal(X_trans.min(axis=0), 0) + assert_array_almost_equal(X_trans.max(axis=0), 1) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + # not default params: min=1, max=2 + scaler = MinMaxScaler(feature_range=(1, 2)) + X_trans = scaler.fit_transform(X) + assert_array_almost_equal(X_trans.min(axis=0), 1) + assert_array_almost_equal(X_trans.max(axis=0), 2) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + # min=-.5, max=.6 + scaler = MinMaxScaler(feature_range=(-0.5, 0.6)) + X_trans = scaler.fit_transform(X) + assert_array_almost_equal(X_trans.min(axis=0), -0.5) + assert_array_almost_equal(X_trans.max(axis=0), 0.6) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + # raises on invalid range + scaler = MinMaxScaler(feature_range=(2, 1)) + with pytest.raises(ValueError): + scaler.fit(X) + + +def test_min_max_scaler_zero_variance_features(): + # Check min max scaler on toy data with zero variance features + X = [[0.0, 1.0, +0.5], [0.0, 1.0, -0.1], [0.0, 1.0, +1.1]] + + X_new = [[+0.0, 2.0, 0.5], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.5]] + + # default params + scaler = MinMaxScaler() + X_trans = scaler.fit_transform(X) + X_expected_0_1 = [[0.0, 0.0, 0.5], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]] + assert_array_almost_equal(X_trans, X_expected_0_1) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + X_trans_new = scaler.transform(X_new) + X_expected_0_1_new = [[+0.0, 1.0, 0.500], [-1.0, 0.0, 0.083], [+0.0, 0.0, 1.333]] + assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2) + + # not default params + scaler = MinMaxScaler(feature_range=(1, 2)) + X_trans = scaler.fit_transform(X) + X_expected_1_2 = [[1.0, 1.0, 1.5], [1.0, 1.0, 1.0], [1.0, 1.0, 2.0]] + assert_array_almost_equal(X_trans, X_expected_1_2) + + # function interface + X_trans = minmax_scale(X) + assert_array_almost_equal(X_trans, X_expected_0_1) + X_trans = minmax_scale(X, feature_range=(1, 2)) + assert_array_almost_equal(X_trans, X_expected_1_2) + + +def test_minmax_scale_axis1(): + X = iris.data + X_trans = minmax_scale(X, axis=1) + assert_array_almost_equal(np.min(X_trans, axis=1), 0) + assert_array_almost_equal(np.max(X_trans, axis=1), 1) + + +def test_min_max_scaler_1d(): + # Test scaling of dataset along single axis + for X in [X_1row, X_1col, X_list_1row, X_list_1row]: + scaler = MinMaxScaler(copy=True) + X_scaled = scaler.fit(X).transform(X) + + if isinstance(X, list): + X = np.array(X) # cast only after scaling done + + if _check_dim_1axis(X) == 1: + assert_array_almost_equal(X_scaled.min(axis=0), np.zeros(n_features)) + assert_array_almost_equal(X_scaled.max(axis=0), np.zeros(n_features)) + else: + assert_array_almost_equal(X_scaled.min(axis=0), 0.0) + assert_array_almost_equal(X_scaled.max(axis=0), 1.0) + assert scaler.n_samples_seen_ == X.shape[0] + + # check inverse transform + X_scaled_back = scaler.inverse_transform(X_scaled) + assert_array_almost_equal(X_scaled_back, X) + + # Constant feature + X = np.ones((5, 1)) + scaler = MinMaxScaler() + X_scaled = scaler.fit(X).transform(X) + assert X_scaled.min() >= 0.0 + assert X_scaled.max() <= 1.0 + assert scaler.n_samples_seen_ == X.shape[0] + + # Function interface + X_1d = X_1row.ravel() + min_ = X_1d.min() + max_ = X_1d.max() + assert_array_almost_equal( + (X_1d - min_) / (max_ - min_), minmax_scale(X_1d, copy=True) + ) + + +@pytest.mark.parametrize("sample_weight", [True, None]) +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_scaler_without_centering(sample_weight, sparse_container): + rng = np.random.RandomState(42) + X = rng.randn(4, 5) + X[:, 0] = 0.0 # first feature is always of zero + X_sparse = sparse_container(X) + + if sample_weight: + sample_weight = rng.rand(X.shape[0]) + + with pytest.raises(ValueError): + StandardScaler().fit(X_sparse) + + scaler = StandardScaler(with_mean=False).fit(X, sample_weight=sample_weight) + X_scaled = scaler.transform(X, copy=True) + assert not np.any(np.isnan(X_scaled)) + + scaler_sparse = StandardScaler(with_mean=False).fit( + X_sparse, sample_weight=sample_weight + ) + X_sparse_scaled = scaler_sparse.transform(X_sparse, copy=True) + assert not np.any(np.isnan(X_sparse_scaled.data)) + + assert_array_almost_equal(scaler.mean_, scaler_sparse.mean_) + assert_array_almost_equal(scaler.var_, scaler_sparse.var_) + assert_array_almost_equal(scaler.scale_, scaler_sparse.scale_) + assert_array_almost_equal(scaler.n_samples_seen_, scaler_sparse.n_samples_seen_) + + if sample_weight is None: + assert_array_almost_equal( + X_scaled.mean(axis=0), [0.0, -0.01, 2.24, -0.35, -0.78], 2 + ) + assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) + + X_sparse_scaled_mean, X_sparse_scaled_var = mean_variance_axis(X_sparse_scaled, 0) + assert_array_almost_equal(X_sparse_scaled_mean, X_scaled.mean(axis=0)) + assert_array_almost_equal(X_sparse_scaled_var, X_scaled.var(axis=0)) + + # Check that X has not been modified (copy) + assert X_scaled is not X + assert X_sparse_scaled is not X_sparse + + X_scaled_back = scaler.inverse_transform(X_scaled) + assert X_scaled_back is not X + assert X_scaled_back is not X_scaled + assert_array_almost_equal(X_scaled_back, X) + + X_sparse_scaled_back = scaler_sparse.inverse_transform(X_sparse_scaled) + assert X_sparse_scaled_back is not X_sparse + assert X_sparse_scaled_back is not X_sparse_scaled + assert_array_almost_equal(X_sparse_scaled_back.toarray(), X) + + if sparse_container in CSR_CONTAINERS: + null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) + X_null = null_transform.fit_transform(X_sparse) + assert_array_equal(X_null.data, X_sparse.data) + X_orig = null_transform.inverse_transform(X_null) + assert_array_equal(X_orig.data, X_sparse.data) + + +@pytest.mark.parametrize("with_mean", [True, False]) +@pytest.mark.parametrize("with_std", [True, False]) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) +def test_scaler_n_samples_seen_with_nan(with_mean, with_std, sparse_container): + X = np.array( + [[0, 1, 3], [np.nan, 6, 10], [5, 4, np.nan], [8, 0, np.nan]], dtype=np.float64 + ) + if sparse_container is not None: + X = sparse_container(X) + + if sparse.issparse(X) and with_mean: + pytest.skip("'with_mean=True' cannot be used with sparse matrix.") + + transformer = StandardScaler(with_mean=with_mean, with_std=with_std) + transformer.fit(X) + + assert_array_equal(transformer.n_samples_seen_, np.array([3, 4, 2])) + + +def _check_identity_scalers_attributes(scaler_1, scaler_2): + assert scaler_1.mean_ is scaler_2.mean_ is None + assert scaler_1.var_ is scaler_2.var_ is None + assert scaler_1.scale_ is scaler_2.scale_ is None + assert scaler_1.n_samples_seen_ == scaler_2.n_samples_seen_ + + +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_scaler_return_identity(sparse_container): + # test that the scaler return identity when with_mean and with_std are + # False + X_dense = np.array([[0, 1, 3], [5, 6, 0], [8, 0, 10]], dtype=np.float64) + X_sparse = sparse_container(X_dense) + + transformer_dense = StandardScaler(with_mean=False, with_std=False) + X_trans_dense = transformer_dense.fit_transform(X_dense) + assert_allclose(X_trans_dense, X_dense) + + transformer_sparse = clone(transformer_dense) + X_trans_sparse = transformer_sparse.fit_transform(X_sparse) + assert_allclose_dense_sparse(X_trans_sparse, X_sparse) + + _check_identity_scalers_attributes(transformer_dense, transformer_sparse) + + transformer_dense.partial_fit(X_dense) + transformer_sparse.partial_fit(X_sparse) + _check_identity_scalers_attributes(transformer_dense, transformer_sparse) + + transformer_dense.fit(X_dense) + transformer_sparse.fit(X_sparse) + _check_identity_scalers_attributes(transformer_dense, transformer_sparse) + + +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_scaler_int(sparse_container): + # test that scaler converts integer input to floating + # for both sparse and dense matrices + rng = np.random.RandomState(42) + X = rng.randint(20, size=(4, 5)) + X[:, 0] = 0 # first feature is always of zero + X_sparse = sparse_container(X) + + with warnings.catch_warnings(record=True): + scaler = StandardScaler(with_mean=False).fit(X) + X_scaled = scaler.transform(X, copy=True) + assert not np.any(np.isnan(X_scaled)) + + with warnings.catch_warnings(record=True): + scaler_sparse = StandardScaler(with_mean=False).fit(X_sparse) + X_sparse_scaled = scaler_sparse.transform(X_sparse, copy=True) + assert not np.any(np.isnan(X_sparse_scaled.data)) + + assert_array_almost_equal(scaler.mean_, scaler_sparse.mean_) + assert_array_almost_equal(scaler.var_, scaler_sparse.var_) + assert_array_almost_equal(scaler.scale_, scaler_sparse.scale_) + + assert_array_almost_equal( + X_scaled.mean(axis=0), [0.0, 1.109, 1.856, 21.0, 1.559], 2 + ) + assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) + + X_sparse_scaled_mean, X_sparse_scaled_std = mean_variance_axis( + X_sparse_scaled.astype(float), 0 + ) + assert_array_almost_equal(X_sparse_scaled_mean, X_scaled.mean(axis=0)) + assert_array_almost_equal(X_sparse_scaled_std, X_scaled.std(axis=0)) + + # Check that X has not been modified (copy) + assert X_scaled is not X + assert X_sparse_scaled is not X_sparse + + X_scaled_back = scaler.inverse_transform(X_scaled) + assert X_scaled_back is not X + assert X_scaled_back is not X_scaled + assert_array_almost_equal(X_scaled_back, X) + + X_sparse_scaled_back = scaler_sparse.inverse_transform(X_sparse_scaled) + assert X_sparse_scaled_back is not X_sparse + assert X_sparse_scaled_back is not X_sparse_scaled + assert_array_almost_equal(X_sparse_scaled_back.toarray(), X) + + if sparse_container in CSR_CONTAINERS: + null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) + with warnings.catch_warnings(record=True): + X_null = null_transform.fit_transform(X_sparse) + assert_array_equal(X_null.data, X_sparse.data) + X_orig = null_transform.inverse_transform(X_null) + assert_array_equal(X_orig.data, X_sparse.data) + + +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_scaler_without_copy(sparse_container): + # Check that StandardScaler.fit does not change input + rng = np.random.RandomState(42) + X = rng.randn(4, 5) + X[:, 0] = 0.0 # first feature is always of zero + X_sparse = sparse_container(X) + + X_copy = X.copy() + StandardScaler(copy=False).fit(X) + assert_array_equal(X, X_copy) + + X_sparse_copy = X_sparse.copy() + StandardScaler(with_mean=False, copy=False).fit(X_sparse) + assert_array_equal(X_sparse.toarray(), X_sparse_copy.toarray()) + + +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_scale_sparse_with_mean_raise_exception(sparse_container): + rng = np.random.RandomState(42) + X = rng.randn(4, 5) + X_sparse = sparse_container(X) + + # check scaling and fit with direct calls on sparse data + with pytest.raises(ValueError): + scale(X_sparse, with_mean=True) + with pytest.raises(ValueError): + StandardScaler(with_mean=True).fit(X_sparse) + + # check transform and inverse_transform after a fit on a dense array + scaler = StandardScaler(with_mean=True).fit(X) + with pytest.raises(ValueError): + scaler.transform(X_sparse) + + X_transformed_sparse = sparse_container(scaler.transform(X)) + with pytest.raises(ValueError): + scaler.inverse_transform(X_transformed_sparse) + + +def test_scale_input_finiteness_validation(): + # Check if non finite inputs raise ValueError + X = [[np.inf, 5, 6, 7, 8]] + with pytest.raises( + ValueError, match="Input contains infinity or a value too large" + ): + scale(X) + + +def test_robust_scaler_error_sparse(): + X_sparse = sparse.rand(1000, 10) + scaler = RobustScaler(with_centering=True) + err_msg = "Cannot center sparse matrices" + with pytest.raises(ValueError, match=err_msg): + scaler.fit(X_sparse) + + +@pytest.mark.parametrize("with_centering", [True, False]) +@pytest.mark.parametrize("with_scaling", [True, False]) +@pytest.mark.parametrize("X", [np.random.randn(10, 3), sparse.rand(10, 3, density=0.5)]) +def test_robust_scaler_attributes(X, with_centering, with_scaling): + # check consistent type of attributes + if with_centering and sparse.issparse(X): + pytest.skip("RobustScaler cannot center sparse matrix") + + scaler = RobustScaler(with_centering=with_centering, with_scaling=with_scaling) + scaler.fit(X) + + if with_centering: + assert isinstance(scaler.center_, np.ndarray) + else: + assert scaler.center_ is None + if with_scaling: + assert isinstance(scaler.scale_, np.ndarray) + else: + assert scaler.scale_ is None + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_robust_scaler_col_zero_sparse(csr_container): + # check that the scaler is working when there is not data materialized in a + # column of a sparse matrix + X = np.random.randn(10, 5) + X[:, 0] = 0 + X = csr_container(X) + + scaler = RobustScaler(with_centering=False) + scaler.fit(X) + assert scaler.scale_[0] == pytest.approx(1) + + X_trans = scaler.transform(X) + assert_allclose(X[:, [0]].toarray(), X_trans[:, [0]].toarray()) + + +def test_robust_scaler_2d_arrays(): + # Test robust scaling of 2d array along first axis + rng = np.random.RandomState(0) + X = rng.randn(4, 5) + X[:, 0] = 0.0 # first feature is always of zero + + scaler = RobustScaler() + X_scaled = scaler.fit(X).transform(X) + + assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0]) + assert_array_almost_equal(X_scaled.std(axis=0)[0], 0) + + +@pytest.mark.parametrize("density", [0, 0.05, 0.1, 0.5, 1]) +@pytest.mark.parametrize("strictly_signed", ["positive", "negative", "zeros", None]) +def test_robust_scaler_equivalence_dense_sparse(density, strictly_signed): + # Check the equivalence of the fitting with dense and sparse matrices + X_sparse = sparse.rand(1000, 5, density=density).tocsc() + if strictly_signed == "positive": + X_sparse.data = np.abs(X_sparse.data) + elif strictly_signed == "negative": + X_sparse.data = -np.abs(X_sparse.data) + elif strictly_signed == "zeros": + X_sparse.data = np.zeros(X_sparse.data.shape, dtype=np.float64) + X_dense = X_sparse.toarray() + + scaler_sparse = RobustScaler(with_centering=False) + scaler_dense = RobustScaler(with_centering=False) + + scaler_sparse.fit(X_sparse) + scaler_dense.fit(X_dense) + + assert_allclose(scaler_sparse.scale_, scaler_dense.scale_) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_robust_scaler_transform_one_row_csr(csr_container): + # Check RobustScaler on transforming csr matrix with one row + rng = np.random.RandomState(0) + X = rng.randn(4, 5) + single_row = np.array([[0.1, 1.0, 2.0, 0.0, -1.0]]) + scaler = RobustScaler(with_centering=False) + scaler = scaler.fit(X) + row_trans = scaler.transform(csr_container(single_row)) + row_expected = single_row / scaler.scale_ + assert_array_almost_equal(row_trans.toarray(), row_expected) + row_scaled_back = scaler.inverse_transform(row_trans) + assert_array_almost_equal(single_row, row_scaled_back.toarray()) + + +def test_robust_scaler_iris(): + X = iris.data + scaler = RobustScaler() + X_trans = scaler.fit_transform(X) + assert_array_almost_equal(np.median(X_trans, axis=0), 0) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + q = np.percentile(X_trans, q=(25, 75), axis=0) + iqr = q[1] - q[0] + assert_array_almost_equal(iqr, 1) + + +def test_robust_scaler_iris_quantiles(): + X = iris.data + scaler = RobustScaler(quantile_range=(10, 90)) + X_trans = scaler.fit_transform(X) + assert_array_almost_equal(np.median(X_trans, axis=0), 0) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + q = np.percentile(X_trans, q=(10, 90), axis=0) + q_range = q[1] - q[0] + assert_array_almost_equal(q_range, 1) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_quantile_transform_iris(csc_container): + X = iris.data + # uniform output distribution + transformer = QuantileTransformer(n_quantiles=30) + X_trans = transformer.fit_transform(X) + X_trans_inv = transformer.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + # normal output distribution + transformer = QuantileTransformer(n_quantiles=30, output_distribution="normal") + X_trans = transformer.fit_transform(X) + X_trans_inv = transformer.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + # make sure it is possible to take the inverse of a sparse matrix + # which contain negative value; this is the case in the iris dataset + X_sparse = csc_container(X) + X_sparse_tran = transformer.fit_transform(X_sparse) + X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran) + assert_array_almost_equal(X_sparse.toarray(), X_sparse_tran_inv.toarray()) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_quantile_transform_check_error(csc_container): + X = np.transpose( + [ + [0, 25, 50, 0, 0, 0, 75, 0, 0, 100], + [2, 4, 0, 0, 6, 8, 0, 10, 0, 0], + [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1], + ] + ) + X = csc_container(X) + X_neg = np.transpose( + [ + [0, 25, 50, 0, 0, 0, 75, 0, 0, 100], + [-2, 4, 0, 0, 6, 8, 0, 10, 0, 0], + [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1], + ] + ) + X_neg = csc_container(X_neg) + + err_msg = ( + "The number of quantiles cannot be greater than " + "the number of samples used. Got 1000 quantiles " + "and 10 samples." + ) + with pytest.raises(ValueError, match=err_msg): + QuantileTransformer(subsample=10).fit(X) + + transformer = QuantileTransformer(n_quantiles=10) + err_msg = "QuantileTransformer only accepts non-negative sparse matrices." + with pytest.raises(ValueError, match=err_msg): + transformer.fit(X_neg) + transformer.fit(X) + err_msg = "QuantileTransformer only accepts non-negative sparse matrices." + with pytest.raises(ValueError, match=err_msg): + transformer.transform(X_neg) + + X_bad_feat = np.transpose( + [[0, 25, 50, 0, 0, 0, 75, 0, 0, 100], [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]] + ) + err_msg = ( + "X has 2 features, but QuantileTransformer is expecting 3 features as input." + ) + with pytest.raises(ValueError, match=err_msg): + transformer.inverse_transform(X_bad_feat) + + transformer = QuantileTransformer(n_quantiles=10).fit(X) + # check that an error is raised if input is scalar + with pytest.raises(ValueError, match="Expected 2D array, got scalar array instead"): + transformer.transform(10) + # check that a warning is raised is n_quantiles > n_samples + transformer = QuantileTransformer(n_quantiles=100) + warn_msg = "n_quantiles is set to n_samples" + with pytest.warns(UserWarning, match=warn_msg) as record: + transformer.fit(X) + assert len(record) == 1 + assert transformer.n_quantiles_ == X.shape[0] + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_quantile_transform_sparse_ignore_zeros(csc_container): + X = np.array([[0, 1], [0, 0], [0, 2], [0, 2], [0, 1]]) + X_sparse = csc_container(X) + transformer = QuantileTransformer(ignore_implicit_zeros=True, n_quantiles=5) + + # dense case -> warning raise + warning_message = ( + "'ignore_implicit_zeros' takes effect" + " only with sparse matrix. This parameter has no" + " effect." + ) + with pytest.warns(UserWarning, match=warning_message): + transformer.fit(X) + + X_expected = np.array([[0, 0], [0, 0], [0, 1], [0, 1], [0, 0]]) + X_trans = transformer.fit_transform(X_sparse) + assert_almost_equal(X_expected, X_trans.toarray()) + + # consider the case where sparse entries are missing values and user-given + # zeros are to be considered + X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0]) + X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]) + X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + X_sparse = csc_container((X_data, (X_row, X_col))) + X_trans = transformer.fit_transform(X_sparse) + X_expected = np.array( + [ + [0.0, 0.5], + [0.0, 0.0], + [0.0, 1.0], + [0.0, 1.0], + [0.0, 0.5], + [0.0, 0.0], + [0.0, 0.5], + [0.0, 1.0], + [0.0, 0.0], + ] + ) + assert_almost_equal(X_expected, X_trans.toarray()) + + transformer = QuantileTransformer(ignore_implicit_zeros=True, n_quantiles=5) + X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1]) + X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1]) + X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6]) + X_sparse = csc_container((X_data, (X_row, X_col))) + X_trans = transformer.fit_transform(X_sparse) + X_expected = np.array( + [[0, 1], [0, 0.375], [0, 0.375], [0, 0.375], [0, 1], [0, 0], [0, 1]] + ) + assert_almost_equal(X_expected, X_trans.toarray()) + assert_almost_equal( + X_sparse.toarray(), transformer.inverse_transform(X_trans).toarray() + ) + + # check in conjunction with subsampling + transformer = QuantileTransformer( + ignore_implicit_zeros=True, n_quantiles=5, subsample=8, random_state=0 + ) + X_trans = transformer.fit_transform(X_sparse) + assert_almost_equal(X_expected, X_trans.toarray()) + assert_almost_equal( + X_sparse.toarray(), transformer.inverse_transform(X_trans).toarray() + ) + + +def test_quantile_transform_dense_toy(): + X = np.array( + [[0, 2, 2.6], [25, 4, 4.1], [50, 6, 2.3], [75, 8, 9.5], [100, 10, 0.1]] + ) + + transformer = QuantileTransformer(n_quantiles=5) + transformer.fit(X) + + # using a uniform output, each entry of X should be map between 0 and 1 + # and equally spaced + X_trans = transformer.fit_transform(X) + X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T + assert_almost_equal(np.sort(X_trans, axis=0), X_expected) + + X_test = np.array( + [ + [-1, 1, 0], + [101, 11, 10], + ] + ) + X_expected = np.array( + [ + [0, 0, 0], + [1, 1, 1], + ] + ) + assert_array_almost_equal(transformer.transform(X_test), X_expected) + + X_trans_inv = transformer.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + +def test_quantile_transform_subsampling(): + # Test that subsampling the input yield to a consistent results We check + # that the computed quantiles are almost mapped to a [0, 1] vector where + # values are equally spaced. The infinite norm is checked to be smaller + # than a given threshold. This is repeated 5 times. + + # dense support + n_samples = 1000000 + n_quantiles = 1000 + X = np.sort(np.random.sample((n_samples, 1)), axis=0) + ROUND = 5 + inf_norm_arr = [] + for random_state in range(ROUND): + transformer = QuantileTransformer( + random_state=random_state, + n_quantiles=n_quantiles, + subsample=n_samples // 10, + ) + transformer.fit(X) + diff = np.linspace(0, 1, n_quantiles) - np.ravel(transformer.quantiles_) + inf_norm = np.max(np.abs(diff)) + assert inf_norm < 1e-2 + inf_norm_arr.append(inf_norm) + # each random subsampling yield a unique approximation to the expected + # linspace CDF + assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr) + + # sparse support + + X = sparse.rand(n_samples, 1, density=0.99, format="csc", random_state=0) + inf_norm_arr = [] + for random_state in range(ROUND): + transformer = QuantileTransformer( + random_state=random_state, + n_quantiles=n_quantiles, + subsample=n_samples // 10, + ) + transformer.fit(X) + diff = np.linspace(0, 1, n_quantiles) - np.ravel(transformer.quantiles_) + inf_norm = np.max(np.abs(diff)) + assert inf_norm < 1e-1 + inf_norm_arr.append(inf_norm) + # each random subsampling yield a unique approximation to the expected + # linspace CDF + assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_quantile_transform_sparse_toy(csc_container): + X = np.array( + [ + [0.0, 2.0, 0.0], + [25.0, 4.0, 0.0], + [50.0, 0.0, 2.6], + [0.0, 0.0, 4.1], + [0.0, 6.0, 0.0], + [0.0, 8.0, 0.0], + [75.0, 0.0, 2.3], + [0.0, 10.0, 0.0], + [0.0, 0.0, 9.5], + [100.0, 0.0, 0.1], + ] + ) + + X = csc_container(X) + + transformer = QuantileTransformer(n_quantiles=10) + transformer.fit(X) + + X_trans = transformer.fit_transform(X) + assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.0) + assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.0) + + X_trans_inv = transformer.inverse_transform(X_trans) + assert_array_almost_equal(X.toarray(), X_trans_inv.toarray()) + + transformer_dense = QuantileTransformer(n_quantiles=10).fit(X.toarray()) + + X_trans = transformer_dense.transform(X) + assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.0) + assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.0) + + X_trans_inv = transformer_dense.inverse_transform(X_trans) + assert_array_almost_equal(X.toarray(), X_trans_inv.toarray()) + + +def test_quantile_transform_axis1(): + X = np.array([[0, 25, 50, 75, 100], [2, 4, 6, 8, 10], [2.6, 4.1, 2.3, 9.5, 0.1]]) + + X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5) + X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5) + assert_array_almost_equal(X_trans_a0, X_trans_a1.T) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_quantile_transform_bounds(csc_container): + # Lower and upper bounds are manually mapped. We checked that in the case + # of a constant feature and binary feature, the bounds are properly mapped. + X_dense = np.array([[0, 0], [0, 0], [1, 0]]) + X_sparse = csc_container(X_dense) + + # check sparse and dense are consistent + X_trans = QuantileTransformer(n_quantiles=3, random_state=0).fit_transform(X_dense) + assert_array_almost_equal(X_trans, X_dense) + X_trans_sp = QuantileTransformer(n_quantiles=3, random_state=0).fit_transform( + X_sparse + ) + assert_array_almost_equal(X_trans_sp.toarray(), X_dense) + assert_array_almost_equal(X_trans, X_trans_sp.toarray()) + + # check the consistency of the bounds by learning on 1 matrix + # and transforming another + X = np.array([[0, 1], [0, 0.5], [1, 0]]) + X1 = np.array([[0, 0.1], [0, 0.5], [1, 0.1]]) + transformer = QuantileTransformer(n_quantiles=3).fit(X) + X_trans = transformer.transform(X1) + assert_array_almost_equal(X_trans, X1) + + # check that values outside of the range learned will be mapped properly. + X = np.random.random((1000, 1)) + transformer = QuantileTransformer() + transformer.fit(X) + assert transformer.transform([[-10]]) == transformer.transform([[np.min(X)]]) + assert transformer.transform([[10]]) == transformer.transform([[np.max(X)]]) + assert transformer.inverse_transform([[-10]]) == transformer.inverse_transform( + [[np.min(transformer.references_)]] + ) + assert transformer.inverse_transform([[10]]) == transformer.inverse_transform( + [[np.max(transformer.references_)]] + ) + + +def test_quantile_transform_and_inverse(): + X_1 = iris.data + X_2 = np.array([[0.0], [BOUNDS_THRESHOLD / 10], [1.5], [2], [3], [3], [4]]) + for X in [X_1, X_2]: + transformer = QuantileTransformer(n_quantiles=1000, random_state=0) + X_trans = transformer.fit_transform(X) + X_trans_inv = transformer.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv, decimal=9) + + +def test_quantile_transform_nan(): + X = np.array([[np.nan, 0, 0, 1], [np.nan, np.nan, 0, 0.5], [np.nan, 1, 1, 0]]) + + transformer = QuantileTransformer(n_quantiles=10, random_state=42) + transformer.fit_transform(X) + + # check that the quantile of the first column is all NaN + assert np.isnan(transformer.quantiles_[:, 0]).all() + # all other column should not contain NaN + assert not np.isnan(transformer.quantiles_[:, 1:]).any() + + +@pytest.mark.parametrize("array_type", ["array", "sparse"]) +def test_quantile_transformer_sorted_quantiles(array_type): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/15733 + # Taken from upstream bug report: + # https://github.com/numpy/numpy/issues/14685 + X = np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, 8, 8, 7] * 10) + X = 0.1 * X.reshape(-1, 1) + X = _convert_container(X, array_type) + + n_quantiles = 100 + qt = QuantileTransformer(n_quantiles=n_quantiles).fit(X) + + # Check that the estimated quantile thresholds are monotically + # increasing: + quantiles = qt.quantiles_[:, 0] + assert len(quantiles) == 100 + assert all(np.diff(quantiles) >= 0) + + +def test_robust_scaler_invalid_range(): + for range_ in [ + (-1, 90), + (-2, -3), + (10, 101), + (100.5, 101), + (90, 50), + ]: + scaler = RobustScaler(quantile_range=range_) + + with pytest.raises(ValueError, match=r"Invalid quantile range: \("): + scaler.fit(iris.data) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_scale_function_without_centering(csr_container): + rng = np.random.RandomState(42) + X = rng.randn(4, 5) + X[:, 0] = 0.0 # first feature is always of zero + X_csr = csr_container(X) + + X_scaled = scale(X, with_mean=False) + assert not np.any(np.isnan(X_scaled)) + + X_csr_scaled = scale(X_csr, with_mean=False) + assert not np.any(np.isnan(X_csr_scaled.data)) + + # test csc has same outcome + X_csc_scaled = scale(X_csr.tocsc(), with_mean=False) + assert_array_almost_equal(X_scaled, X_csc_scaled.toarray()) + + # raises value error on axis != 0 + with pytest.raises(ValueError): + scale(X_csr, with_mean=False, axis=1) + + assert_array_almost_equal( + X_scaled.mean(axis=0), [0.0, -0.01, 2.24, -0.35, -0.78], 2 + ) + assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) + # Check that X has not been copied + assert X_scaled is not X + + X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) + assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) + assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) + + # null scale + X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True) + assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray()) + + +def test_robust_scale_axis1(): + X = iris.data + X_trans = robust_scale(X, axis=1) + assert_array_almost_equal(np.median(X_trans, axis=1), 0) + q = np.percentile(X_trans, q=(25, 75), axis=1) + iqr = q[1] - q[0] + assert_array_almost_equal(iqr, 1) + + +def test_robust_scale_1d_array(): + X = iris.data[:, 1] + X_trans = robust_scale(X) + assert_array_almost_equal(np.median(X_trans), 0) + q = np.percentile(X_trans, q=(25, 75)) + iqr = q[1] - q[0] + assert_array_almost_equal(iqr, 1) + + +def test_robust_scaler_zero_variance_features(): + # Check RobustScaler on toy data with zero variance features + X = [[0.0, 1.0, +0.5], [0.0, 1.0, -0.1], [0.0, 1.0, +1.1]] + + scaler = RobustScaler() + X_trans = scaler.fit_transform(X) + + # NOTE: for such a small sample size, what we expect in the third column + # depends HEAVILY on the method used to calculate quantiles. The values + # here were calculated to fit the quantiles produces by np.percentile + # using numpy 1.9 Calculating quantiles with + # scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles + # would yield very different results! + X_expected = [[0.0, 0.0, +0.0], [0.0, 0.0, -1.0], [0.0, 0.0, +1.0]] + assert_array_almost_equal(X_trans, X_expected) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + # make sure new data gets transformed correctly + X_new = [[+0.0, 2.0, 0.5], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.5]] + X_trans_new = scaler.transform(X_new) + X_expected_new = [[+0.0, 1.0, +0.0], [-1.0, 0.0, -0.83333], [+0.0, 0.0, +1.66667]] + assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3) + + +def test_robust_scaler_unit_variance(): + # Check RobustScaler with unit_variance=True on standard normal data with + # outliers + rng = np.random.RandomState(42) + X = rng.randn(1000000, 1) + X_with_outliers = np.vstack([X, np.ones((100, 1)) * 100, np.ones((100, 1)) * -100]) + + quantile_range = (1, 99) + robust_scaler = RobustScaler(quantile_range=quantile_range, unit_variance=True).fit( + X_with_outliers + ) + X_trans = robust_scaler.transform(X) + + assert robust_scaler.center_ == pytest.approx(0, abs=1e-3) + assert robust_scaler.scale_ == pytest.approx(1, abs=1e-2) + assert X_trans.std() == pytest.approx(1, abs=1e-2) + + +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_maxabs_scaler_zero_variance_features(sparse_container): + # Check MaxAbsScaler on toy data with zero variance features + X = [[0.0, 1.0, +0.5], [0.0, 1.0, -0.3], [0.0, 1.0, +1.5], [0.0, 0.0, +0.0]] + + scaler = MaxAbsScaler() + X_trans = scaler.fit_transform(X) + X_expected = [ + [0.0, 1.0, 1.0 / 3.0], + [0.0, 1.0, -0.2], + [0.0, 1.0, 1.0], + [0.0, 0.0, 0.0], + ] + assert_array_almost_equal(X_trans, X_expected) + X_trans_inv = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X, X_trans_inv) + + # make sure new data gets transformed correctly + X_new = [[+0.0, 2.0, 0.5], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.5]] + X_trans_new = scaler.transform(X_new) + X_expected_new = [[+0.0, 2.0, 1.0 / 3.0], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.0]] + + assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2) + + # function interface + X_trans = maxabs_scale(X) + assert_array_almost_equal(X_trans, X_expected) + + # sparse data + X_sparse = sparse_container(X) + X_trans_sparse = scaler.fit_transform(X_sparse) + X_expected = [ + [0.0, 1.0, 1.0 / 3.0], + [0.0, 1.0, -0.2], + [0.0, 1.0, 1.0], + [0.0, 0.0, 0.0], + ] + assert_array_almost_equal(X_trans_sparse.toarray(), X_expected) + X_trans_sparse_inv = scaler.inverse_transform(X_trans_sparse) + assert_array_almost_equal(X, X_trans_sparse_inv.toarray()) + + +def test_maxabs_scaler_large_negative_value(): + # Check MaxAbsScaler on toy data with a large negative value + X = [ + [0.0, 1.0, +0.5, -1.0], + [0.0, 1.0, -0.3, -0.5], + [0.0, 1.0, -100.0, 0.0], + [0.0, 0.0, +0.0, -2.0], + ] + + scaler = MaxAbsScaler() + X_trans = scaler.fit_transform(X) + X_expected = [ + [0.0, 1.0, 0.005, -0.5], + [0.0, 1.0, -0.003, -0.25], + [0.0, 1.0, -1.0, 0.0], + [0.0, 0.0, 0.0, -1.0], + ] + assert_array_almost_equal(X_trans, X_expected) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_maxabs_scaler_transform_one_row_csr(csr_container): + # Check MaxAbsScaler on transforming csr matrix with one row + X = csr_container([[0.5, 1.0, 1.0]]) + scaler = MaxAbsScaler() + scaler = scaler.fit(X) + X_trans = scaler.transform(X) + X_expected = csr_container([[1.0, 1.0, 1.0]]) + assert_array_almost_equal(X_trans.toarray(), X_expected.toarray()) + X_scaled_back = scaler.inverse_transform(X_trans) + assert_array_almost_equal(X.toarray(), X_scaled_back.toarray()) + + +def test_maxabs_scaler_1d(): + # Test scaling of dataset along single axis + for X in [X_1row, X_1col, X_list_1row, X_list_1row]: + scaler = MaxAbsScaler(copy=True) + X_scaled = scaler.fit(X).transform(X) + + if isinstance(X, list): + X = np.array(X) # cast only after scaling done + + if _check_dim_1axis(X) == 1: + assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), np.ones(n_features)) + else: + assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.0) + assert scaler.n_samples_seen_ == X.shape[0] + + # check inverse transform + X_scaled_back = scaler.inverse_transform(X_scaled) + assert_array_almost_equal(X_scaled_back, X) + + # Constant feature + X = np.ones((5, 1)) + scaler = MaxAbsScaler() + X_scaled = scaler.fit(X).transform(X) + assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.0) + assert scaler.n_samples_seen_ == X.shape[0] + + # function interface + X_1d = X_1row.ravel() + max_abs = np.abs(X_1d).max() + assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_maxabs_scaler_partial_fit(csr_container): + # Test if partial_fit run over many batches of size 1 and 50 + # gives the same results as fit + X = X_2d[:100, :] + n = X.shape[0] + + for chunk_size in [1, 2, 50, n, n + 42]: + # Test mean at the end of the process + scaler_batch = MaxAbsScaler().fit(X) + + scaler_incr = MaxAbsScaler() + scaler_incr_csr = MaxAbsScaler() + scaler_incr_csc = MaxAbsScaler() + for batch in gen_batches(n, chunk_size): + scaler_incr = scaler_incr.partial_fit(X[batch]) + X_csr = csr_container(X[batch]) + scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr) + X_csc = csr_container(X[batch]) + scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc) + + assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_) + assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr_csr.max_abs_) + assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr_csc.max_abs_) + assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ + assert scaler_batch.n_samples_seen_ == scaler_incr_csr.n_samples_seen_ + assert scaler_batch.n_samples_seen_ == scaler_incr_csc.n_samples_seen_ + assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) + assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_) + assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_) + assert_array_almost_equal(scaler_batch.transform(X), scaler_incr.transform(X)) + + # Test std after 1 step + batch0 = slice(0, chunk_size) + scaler_batch = MaxAbsScaler().fit(X[batch0]) + scaler_incr = MaxAbsScaler().partial_fit(X[batch0]) + + assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_) + assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ + assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) + assert_array_almost_equal(scaler_batch.transform(X), scaler_incr.transform(X)) + + # Test std until the end of partial fits, and + scaler_batch = MaxAbsScaler().fit(X) + scaler_incr = MaxAbsScaler() # Clean estimator + for i, batch in enumerate(gen_batches(n, chunk_size)): + scaler_incr = scaler_incr.partial_fit(X[batch]) + assert_correct_incr( + i, + batch_start=batch.start, + batch_stop=batch.stop, + n=n, + chunk_size=chunk_size, + n_samples_seen=scaler_incr.n_samples_seen_, + ) + + +def check_normalizer(norm, X_norm): + """ + Convenient checking function for `test_normalizer_l1_l2_max` and + `test_normalizer_l1_l2_max_non_csr` + """ + if norm == "l1": + row_sums = np.abs(X_norm).sum(axis=1) + for i in range(3): + assert_almost_equal(row_sums[i], 1.0) + assert_almost_equal(row_sums[3], 0.0) + elif norm == "l2": + for i in range(3): + assert_almost_equal(la.norm(X_norm[i]), 1.0) + assert_almost_equal(la.norm(X_norm[3]), 0.0) + elif norm == "max": + row_maxs = abs(X_norm).max(axis=1) + for i in range(3): + assert_almost_equal(row_maxs[i], 1.0) + assert_almost_equal(row_maxs[3], 0.0) + + +@pytest.mark.parametrize("norm", ["l1", "l2", "max"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_normalizer_l1_l2_max(norm, csr_container): + rng = np.random.RandomState(0) + X_dense = rng.randn(4, 5) + X_sparse_unpruned = csr_container(X_dense) + + # set the row number 3 to zero + X_dense[3, :] = 0.0 + + # set the row number 3 to zero without pruning (can happen in real life) + indptr_3 = X_sparse_unpruned.indptr[3] + indptr_4 = X_sparse_unpruned.indptr[4] + X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 + + # build the pruned variant using the regular constructor + X_sparse_pruned = csr_container(X_dense) + + # check inputs that support the no-copy optim + for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): + normalizer = Normalizer(norm=norm, copy=True) + X_norm1 = normalizer.transform(X) + assert X_norm1 is not X + X_norm1 = toarray(X_norm1) + + normalizer = Normalizer(norm=norm, copy=False) + X_norm2 = normalizer.transform(X) + assert X_norm2 is X + X_norm2 = toarray(X_norm2) + + for X_norm in (X_norm1, X_norm2): + check_normalizer(norm, X_norm) + + +@pytest.mark.parametrize("norm", ["l1", "l2", "max"]) +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + LIL_CONTAINERS +) +def test_normalizer_l1_l2_max_non_csr(norm, sparse_container): + rng = np.random.RandomState(0) + X_dense = rng.randn(4, 5) + + # set the row number 3 to zero + X_dense[3, :] = 0.0 + + X = sparse_container(X_dense) + X_norm = Normalizer(norm=norm, copy=False).transform(X) + + assert X_norm is not X + assert sparse.issparse(X_norm) and X_norm.format == "csr" + + X_norm = toarray(X_norm) + check_normalizer(norm, X_norm) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_normalizer_max_sign(csr_container): + # check that we normalize by a positive number even for negative data + rng = np.random.RandomState(0) + X_dense = rng.randn(4, 5) + # set the row number 3 to zero + X_dense[3, :] = 0.0 + # check for mixed data where the value with + # largest magnitude is negative + X_dense[2, abs(X_dense[2, :]).argmax()] *= -1 + X_all_neg = -np.abs(X_dense) + X_all_neg_sparse = csr_container(X_all_neg) + + for X in (X_dense, X_all_neg, X_all_neg_sparse): + normalizer = Normalizer(norm="max") + X_norm = normalizer.transform(X) + assert X_norm is not X + X_norm = toarray(X_norm) + assert_array_equal(np.sign(X_norm), np.sign(toarray(X))) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_normalize(csr_container): + # Test normalize function + # Only tests functionality not used by the tests for Normalizer. + X = np.random.RandomState(37).randn(3, 2) + assert_array_equal(normalize(X, copy=False), normalize(X.T, axis=0, copy=False).T) + + rs = np.random.RandomState(0) + X_dense = rs.randn(10, 5) + X_sparse = csr_container(X_dense) + ones = np.ones((10)) + for X in (X_dense, X_sparse): + for dtype in (np.float32, np.float64): + for norm in ("l1", "l2"): + X = X.astype(dtype) + X_norm = normalize(X, norm=norm) + assert X_norm.dtype == dtype + + X_norm = toarray(X_norm) + if norm == "l1": + row_sums = np.abs(X_norm).sum(axis=1) + else: + X_norm_squared = X_norm**2 + row_sums = X_norm_squared.sum(axis=1) + + assert_array_almost_equal(row_sums, ones) + + # Test return_norm + X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]]) + for norm in ("l1", "l2", "max"): + _, norms = normalize(X_dense, norm=norm, return_norm=True) + if norm == "l1": + assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0])) + elif norm == "l2": + assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127])) + else: + assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0])) + + X_sparse = csr_container(X_dense) + for norm in ("l1", "l2"): + with pytest.raises(NotImplementedError): + normalize(X_sparse, norm=norm, return_norm=True) + _, norms = normalize(X_sparse, norm="max", return_norm=True) + assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0])) + + +@pytest.mark.parametrize( + "constructor", [np.array, list] + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_binarizer(constructor): + X_ = np.array([[1, 0, 5], [2, 3, -1]]) + X = constructor(X_.copy()) + + binarizer = Binarizer(threshold=2.0, copy=True) + X_bin = toarray(binarizer.transform(X)) + assert np.sum(X_bin == 0) == 4 + assert np.sum(X_bin == 1) == 2 + X_bin = binarizer.transform(X) + assert sparse.issparse(X) == sparse.issparse(X_bin) + + binarizer = Binarizer(copy=True).fit(X) + X_bin = toarray(binarizer.transform(X)) + assert X_bin is not X + assert np.sum(X_bin == 0) == 2 + assert np.sum(X_bin == 1) == 4 + + binarizer = Binarizer(copy=True) + X_bin = binarizer.transform(X) + assert X_bin is not X + X_bin = toarray(X_bin) + assert np.sum(X_bin == 0) == 2 + assert np.sum(X_bin == 1) == 4 + + binarizer = Binarizer(copy=False) + X_bin = binarizer.transform(X) + if constructor is not list: + assert X_bin is X + + binarizer = Binarizer(copy=False) + X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64) + X_bin = binarizer.transform(X_float) + if constructor is not list: + assert X_bin is X_float + + X_bin = toarray(X_bin) + assert np.sum(X_bin == 0) == 2 + assert np.sum(X_bin == 1) == 4 + + binarizer = Binarizer(threshold=-0.5, copy=True) + if constructor in (np.array, list): + X = constructor(X_.copy()) + + X_bin = toarray(binarizer.transform(X)) + assert np.sum(X_bin == 0) == 1 + assert np.sum(X_bin == 1) == 5 + X_bin = binarizer.transform(X) + + # Cannot use threshold < 0 for sparse + if constructor in CSC_CONTAINERS: + with pytest.raises(ValueError): + binarizer.transform(constructor(X)) + + +def test_center_kernel(): + # Test that KernelCenterer is equivalent to StandardScaler + # in feature space + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + scaler = StandardScaler(with_std=False) + scaler.fit(X_fit) + X_fit_centered = scaler.transform(X_fit) + K_fit = np.dot(X_fit, X_fit.T) + + # center fit time matrix + centerer = KernelCenterer() + K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T) + K_fit_centered2 = centerer.fit_transform(K_fit) + assert_array_almost_equal(K_fit_centered, K_fit_centered2) + + # center predict time matrix + X_pred = rng.random_sample((2, 4)) + K_pred = np.dot(X_pred, X_fit.T) + X_pred_centered = scaler.transform(X_pred) + K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T) + K_pred_centered2 = centerer.transform(K_pred) + assert_array_almost_equal(K_pred_centered, K_pred_centered2) + + # check the results coherence with the method proposed in: + # B. Schölkopf, A. Smola, and K.R. Müller, + # "Nonlinear component analysis as a kernel eigenvalue problem" + # equation (B.3) + + # K_centered3 = (I - 1_M) K (I - 1_M) + # = K - 1_M K - K 1_M + 1_M K 1_M + ones_M = np.ones_like(K_fit) / K_fit.shape[0] + K_fit_centered3 = K_fit - ones_M @ K_fit - K_fit @ ones_M + ones_M @ K_fit @ ones_M + assert_allclose(K_fit_centered, K_fit_centered3) + + # K_test_centered3 = (K_test - 1'_M K)(I - 1_M) + # = K_test - 1'_M K - K_test 1_M + 1'_M K 1_M + ones_prime_M = np.ones_like(K_pred) / K_fit.shape[0] + K_pred_centered3 = ( + K_pred - ones_prime_M @ K_fit - K_pred @ ones_M + ones_prime_M @ K_fit @ ones_M + ) + assert_allclose(K_pred_centered, K_pred_centered3) + + +def test_kernelcenterer_non_linear_kernel(): + """Check kernel centering for non-linear kernel.""" + rng = np.random.RandomState(0) + X, X_test = rng.randn(100, 50), rng.randn(20, 50) + + def phi(X): + """Our mapping function phi.""" + return np.vstack( + [ + np.clip(X, a_min=0, a_max=None), + -np.clip(X, a_min=None, a_max=0), + ] + ) + + phi_X = phi(X) + phi_X_test = phi(X_test) + + # centered the projection + scaler = StandardScaler(with_std=False) + phi_X_center = scaler.fit_transform(phi_X) + phi_X_test_center = scaler.transform(phi_X_test) + + # create the different kernel + K = phi_X @ phi_X.T + K_test = phi_X_test @ phi_X.T + K_center = phi_X_center @ phi_X_center.T + K_test_center = phi_X_test_center @ phi_X_center.T + + kernel_centerer = KernelCenterer() + kernel_centerer.fit(K) + + assert_allclose(kernel_centerer.transform(K), K_center) + assert_allclose(kernel_centerer.transform(K_test), K_test_center) + + # check the results coherence with the method proposed in: + # B. Schölkopf, A. Smola, and K.R. Müller, + # "Nonlinear component analysis as a kernel eigenvalue problem" + # equation (B.3) + + # K_centered = (I - 1_M) K (I - 1_M) + # = K - 1_M K - K 1_M + 1_M K 1_M + ones_M = np.ones_like(K) / K.shape[0] + K_centered = K - ones_M @ K - K @ ones_M + ones_M @ K @ ones_M + assert_allclose(kernel_centerer.transform(K), K_centered) + + # K_test_centered = (K_test - 1'_M K)(I - 1_M) + # = K_test - 1'_M K - K_test 1_M + 1'_M K 1_M + ones_prime_M = np.ones_like(K_test) / K.shape[0] + K_test_centered = ( + K_test - ones_prime_M @ K - K_test @ ones_M + ones_prime_M @ K @ ones_M + ) + assert_allclose(kernel_centerer.transform(K_test), K_test_centered) + + +def test_cv_pipeline_precomputed(): + # Cross-validate a regression on four coplanar points with the same + # value. Use precomputed kernel to ensure Pipeline with KernelCenterer + # is treated as a pairwise operation. + X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]]) + y_true = np.ones((4,)) + K = X.dot(X.T) + kcent = KernelCenterer() + pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())]) + + # did the pipeline set the pairwise attribute? + assert pipeline._get_tags()["pairwise"] + + # test cross-validation, score should be almost perfect + # NB: this test is pretty vacuous -- it's mainly to test integration + # of Pipeline and KernelCenterer + y_pred = cross_val_predict(pipeline, K, y_true, cv=2) + assert_array_almost_equal(y_true, y_pred) + + +def test_fit_transform(): + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + for obj in (StandardScaler(), Normalizer(), Binarizer()): + X_transformed = obj.fit(X).transform(X) + X_transformed2 = obj.fit_transform(X) + assert_array_equal(X_transformed, X_transformed2) + + +def test_add_dummy_feature(): + X = [[1, 0], [0, 1], [0, 1]] + X = add_dummy_feature(X) + assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_add_dummy_feature_sparse(sparse_container): + X = sparse_container([[1, 0], [0, 1], [0, 1]]) + desired_format = X.format + X = add_dummy_feature(X) + assert sparse.issparse(X) and X.format == desired_format, X + assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) + + +def test_fit_cold_start(): + X = iris.data + X_2d = X[:, :2] + + # Scalers that have a partial_fit method + scalers = [ + StandardScaler(with_mean=False, with_std=False), + MinMaxScaler(), + MaxAbsScaler(), + ] + + for scaler in scalers: + scaler.fit_transform(X) + # with a different shape, this may break the scaler unless the internal + # state is reset + scaler.fit_transform(X_2d) + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +def test_power_transformer_notfitted(method): + pt = PowerTransformer(method=method) + X = np.abs(X_1col) + with pytest.raises(NotFittedError): + pt.transform(X) + with pytest.raises(NotFittedError): + pt.inverse_transform(X) + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +@pytest.mark.parametrize("standardize", [True, False]) +@pytest.mark.parametrize("X", [X_1col, X_2d]) +def test_power_transformer_inverse(method, standardize, X): + # Make sure we get the original input when applying transform and then + # inverse transform + X = np.abs(X) if method == "box-cox" else X + pt = PowerTransformer(method=method, standardize=standardize) + X_trans = pt.fit_transform(X) + assert_almost_equal(X, pt.inverse_transform(X_trans)) + + +def test_power_transformer_1d(): + X = np.abs(X_1col) + + for standardize in [True, False]: + pt = PowerTransformer(method="box-cox", standardize=standardize) + + X_trans = pt.fit_transform(X) + X_trans_func = power_transform(X, method="box-cox", standardize=standardize) + + X_expected, lambda_expected = stats.boxcox(X.flatten()) + + if standardize: + X_expected = scale(X_expected) + + assert_almost_equal(X_expected.reshape(-1, 1), X_trans) + assert_almost_equal(X_expected.reshape(-1, 1), X_trans_func) + + assert_almost_equal(X, pt.inverse_transform(X_trans)) + assert_almost_equal(lambda_expected, pt.lambdas_[0]) + + assert len(pt.lambdas_) == X.shape[1] + assert isinstance(pt.lambdas_, np.ndarray) + + +def test_power_transformer_2d(): + X = np.abs(X_2d) + + for standardize in [True, False]: + pt = PowerTransformer(method="box-cox", standardize=standardize) + + X_trans_class = pt.fit_transform(X) + X_trans_func = power_transform(X, method="box-cox", standardize=standardize) + + for X_trans in [X_trans_class, X_trans_func]: + for j in range(X_trans.shape[1]): + X_expected, lmbda = stats.boxcox(X[:, j].flatten()) + + if standardize: + X_expected = scale(X_expected) + + assert_almost_equal(X_trans[:, j], X_expected) + assert_almost_equal(lmbda, pt.lambdas_[j]) + + # Test inverse transformation + X_inv = pt.inverse_transform(X_trans) + assert_array_almost_equal(X_inv, X) + + assert len(pt.lambdas_) == X.shape[1] + assert isinstance(pt.lambdas_, np.ndarray) + + +def test_power_transformer_boxcox_strictly_positive_exception(): + # Exceptions should be raised for negative arrays and zero arrays when + # method is boxcox + + pt = PowerTransformer(method="box-cox") + pt.fit(np.abs(X_2d)) + X_with_negatives = X_2d + not_positive_message = "strictly positive" + + with pytest.raises(ValueError, match=not_positive_message): + pt.transform(X_with_negatives) + + with pytest.raises(ValueError, match=not_positive_message): + pt.fit(X_with_negatives) + + with pytest.raises(ValueError, match=not_positive_message): + power_transform(X_with_negatives, method="box-cox") + + with pytest.raises(ValueError, match=not_positive_message): + pt.transform(np.zeros(X_2d.shape)) + + with pytest.raises(ValueError, match=not_positive_message): + pt.fit(np.zeros(X_2d.shape)) + + with pytest.raises(ValueError, match=not_positive_message): + power_transform(np.zeros(X_2d.shape), method="box-cox") + + +@pytest.mark.parametrize("X", [X_2d, np.abs(X_2d), -np.abs(X_2d), np.zeros(X_2d.shape)]) +def test_power_transformer_yeojohnson_any_input(X): + # Yeo-Johnson method should support any kind of input + power_transform(X, method="yeo-johnson") + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +def test_power_transformer_shape_exception(method): + pt = PowerTransformer(method=method) + X = np.abs(X_2d) + pt.fit(X) + + # Exceptions should be raised for arrays with different num_columns + # than during fitting + wrong_shape_message = ( + r"X has \d+ features, but PowerTransformer is " r"expecting \d+ features" + ) + + with pytest.raises(ValueError, match=wrong_shape_message): + pt.transform(X[:, 0:1]) + + with pytest.raises(ValueError, match=wrong_shape_message): + pt.inverse_transform(X[:, 0:1]) + + +def test_power_transformer_lambda_zero(): + pt = PowerTransformer(method="box-cox", standardize=False) + X = np.abs(X_2d)[:, 0:1] + + # Test the lambda = 0 case + pt.lambdas_ = np.array([0]) + X_trans = pt.transform(X) + assert_array_almost_equal(pt.inverse_transform(X_trans), X) + + +def test_power_transformer_lambda_one(): + # Make sure lambda = 1 corresponds to the identity for yeo-johnson + pt = PowerTransformer(method="yeo-johnson", standardize=False) + X = np.abs(X_2d)[:, 0:1] + + pt.lambdas_ = np.array([1]) + X_trans = pt.transform(X) + assert_array_almost_equal(X_trans, X) + + +@pytest.mark.parametrize( + "method, lmbda", + [ + ("box-cox", 0.1), + ("box-cox", 0.5), + ("yeo-johnson", 0.1), + ("yeo-johnson", 0.5), + ("yeo-johnson", 1.0), + ], +) +def test_optimization_power_transformer(method, lmbda): + # Test the optimization procedure: + # - set a predefined value for lambda + # - apply inverse_transform to a normal dist (we get X_inv) + # - apply fit_transform to X_inv (we get X_inv_trans) + # - check that X_inv_trans is roughly equal to X + + rng = np.random.RandomState(0) + n_samples = 20000 + X = rng.normal(loc=0, scale=1, size=(n_samples, 1)) + + pt = PowerTransformer(method=method, standardize=False) + pt.lambdas_ = [lmbda] + X_inv = pt.inverse_transform(X) + + pt = PowerTransformer(method=method, standardize=False) + X_inv_trans = pt.fit_transform(X_inv) + + assert_almost_equal(0, np.linalg.norm(X - X_inv_trans) / n_samples, decimal=2) + assert_almost_equal(0, X_inv_trans.mean(), decimal=1) + assert_almost_equal(1, X_inv_trans.std(), decimal=1) + + +def test_yeo_johnson_darwin_example(): + # test from original paper "A new family of power transformations to + # improve normality or symmetry" by Yeo and Johnson. + X = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3, 7.5, -6.0] + X = np.array(X).reshape(-1, 1) + lmbda = PowerTransformer(method="yeo-johnson").fit(X).lambdas_ + assert np.allclose(lmbda, 1.305, atol=1e-3) + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +def test_power_transformer_nans(method): + # Make sure lambda estimation is not influenced by NaN values + # and that transform() supports NaN silently + + X = np.abs(X_1col) + pt = PowerTransformer(method=method) + pt.fit(X) + lmbda_no_nans = pt.lambdas_[0] + + # concat nans at the end and check lambda stays the same + X = np.concatenate([X, np.full_like(X, np.nan)]) + X = shuffle(X, random_state=0) + + pt.fit(X) + lmbda_nans = pt.lambdas_[0] + + assert_almost_equal(lmbda_no_nans, lmbda_nans, decimal=5) + + X_trans = pt.transform(X) + assert_array_equal(np.isnan(X_trans), np.isnan(X)) + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +@pytest.mark.parametrize("standardize", [True, False]) +def test_power_transformer_fit_transform(method, standardize): + # check that fit_transform() and fit().transform() return the same values + X = X_1col + if method == "box-cox": + X = np.abs(X) + + pt = PowerTransformer(method, standardize=standardize) + assert_array_almost_equal(pt.fit(X).transform(X), pt.fit_transform(X)) + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +@pytest.mark.parametrize("standardize", [True, False]) +def test_power_transformer_copy_True(method, standardize): + # Check that neither fit, transform, fit_transform nor inverse_transform + # modify X inplace when copy=True + X = X_1col + if method == "box-cox": + X = np.abs(X) + + X_original = X.copy() + assert X is not X_original # sanity checks + assert_array_almost_equal(X, X_original) + + pt = PowerTransformer(method, standardize=standardize, copy=True) + + pt.fit(X) + assert_array_almost_equal(X, X_original) + X_trans = pt.transform(X) + assert X_trans is not X + + X_trans = pt.fit_transform(X) + assert_array_almost_equal(X, X_original) + assert X_trans is not X + + X_inv_trans = pt.inverse_transform(X_trans) + assert X_trans is not X_inv_trans + + +@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"]) +@pytest.mark.parametrize("standardize", [True, False]) +def test_power_transformer_copy_False(method, standardize): + # check that when copy=False fit doesn't change X inplace but transform, + # fit_transform and inverse_transform do. + X = X_1col + if method == "box-cox": + X = np.abs(X) + + X_original = X.copy() + assert X is not X_original # sanity checks + assert_array_almost_equal(X, X_original) + + pt = PowerTransformer(method, standardize=standardize, copy=False) + + pt.fit(X) + assert_array_almost_equal(X, X_original) # fit didn't change X + + X_trans = pt.transform(X) + assert X_trans is X + + if method == "box-cox": + X = np.abs(X) + X_trans = pt.fit_transform(X) + assert X_trans is X + + X_inv_trans = pt.inverse_transform(X_trans) + assert X_trans is X_inv_trans + + +def test_power_transformer_box_cox_raise_all_nans_col(): + """Check that box-cox raises informative when a column contains all nans. + + Non-regression test for gh-26303 + """ + X = rng.random_sample((4, 5)) + X[:, 0] = np.nan + + err_msg = "Column must not be all nan." + + pt = PowerTransformer(method="box-cox") + with pytest.raises(ValueError, match=err_msg): + pt.fit_transform(X) + + +@pytest.mark.parametrize( + "X_2", + [sparse.random(10, 1, density=0.8, random_state=0)] + + [ + csr_container(np.full((10, 1), fill_value=np.nan)) + for csr_container in CSR_CONTAINERS + ], +) +def test_standard_scaler_sparse_partial_fit_finite_variance(X_2): + # non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/16448 + X_1 = sparse.random(5, 1, density=0.8) + scaler = StandardScaler(with_mean=False) + scaler.fit(X_1).partial_fit(X_2) + assert np.isfinite(scaler.var_[0]) + + +@pytest.mark.parametrize("feature_range", [(0, 1), (-10, 10)]) +def test_minmax_scaler_clip(feature_range): + # test behaviour of the parameter 'clip' in MinMaxScaler + X = iris.data + scaler = MinMaxScaler(feature_range=feature_range, clip=True).fit(X) + X_min, X_max = np.min(X, axis=0), np.max(X, axis=0) + X_test = [np.r_[X_min[:2] - 10, X_max[2:] + 10]] + X_transformed = scaler.transform(X_test) + assert_allclose( + X_transformed, + [[feature_range[0], feature_range[0], feature_range[1], feature_range[1]]], + ) + + +def test_standard_scaler_raise_error_for_1d_input(): + """Check that `inverse_transform` from `StandardScaler` raises an error + with 1D array. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19518 + """ + scaler = StandardScaler().fit(X_2d) + err_msg = "Expected 2D array, got 1D array instead" + with pytest.raises(ValueError, match=err_msg): + scaler.inverse_transform(X_2d[:, 0]) + + +def test_power_transformer_significantly_non_gaussian(): + """Check that significantly non-Gaussian data before transforms correctly. + + For some explored lambdas, the transformed data may be constant and will + be rejected. Non-regression test for + https://github.com/scikit-learn/scikit-learn/issues/14959 + """ + + X_non_gaussian = 1e6 * np.array( + [0.6, 2.0, 3.0, 4.0] * 4 + [11, 12, 12, 16, 17, 20, 85, 90], dtype=np.float64 + ).reshape(-1, 1) + pt = PowerTransformer() + + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + X_trans = pt.fit_transform(X_non_gaussian) + + assert not np.any(np.isnan(X_trans)) + assert X_trans.mean() == pytest.approx(0.0) + assert X_trans.std() == pytest.approx(1.0) + assert X_trans.min() > -2 + assert X_trans.max() < 2 + + +@pytest.mark.parametrize( + "Transformer", + [ + MinMaxScaler, + MaxAbsScaler, + RobustScaler, + StandardScaler, + QuantileTransformer, + PowerTransformer, + ], +) +def test_one_to_one_features(Transformer): + """Check one-to-one transformers give correct feature names.""" + tr = Transformer().fit(iris.data) + names_out = tr.get_feature_names_out(iris.feature_names) + assert_array_equal(names_out, iris.feature_names) + + +@pytest.mark.parametrize( + "Transformer", + [ + MinMaxScaler, + MaxAbsScaler, + RobustScaler, + StandardScaler, + QuantileTransformer, + PowerTransformer, + Normalizer, + Binarizer, + ], +) +def test_one_to_one_features_pandas(Transformer): + """Check one-to-one transformers give correct feature names.""" + pd = pytest.importorskip("pandas") + + df = pd.DataFrame(iris.data, columns=iris.feature_names) + tr = Transformer().fit(df) + + names_out_df_default = tr.get_feature_names_out() + assert_array_equal(names_out_df_default, iris.feature_names) + + names_out_df_valid_in = tr.get_feature_names_out(iris.feature_names) + assert_array_equal(names_out_df_valid_in, iris.feature_names) + + msg = re.escape("input_features is not equal to feature_names_in_") + with pytest.raises(ValueError, match=msg): + invalid_names = list("abcd") + tr.get_feature_names_out(invalid_names) + + +def test_kernel_centerer_feature_names_out(): + """Test that kernel centerer `feature_names_out`.""" + + rng = np.random.RandomState(0) + X = rng.random_sample((6, 4)) + X_pairwise = linear_kernel(X) + centerer = KernelCenterer().fit(X_pairwise) + + names_out = centerer.get_feature_names_out() + samples_out2 = X_pairwise.shape[1] + assert_array_equal(names_out, [f"kernelcenterer{i}" for i in range(samples_out2)]) + + +@pytest.mark.parametrize("standardize", [True, False]) +def test_power_transformer_constant_feature(standardize): + """Check that PowerTransfomer leaves constant features unchanged.""" + X = [[-2, 0, 2], [-2, 0, 2], [-2, 0, 2]] + + pt = PowerTransformer(method="yeo-johnson", standardize=standardize).fit(X) + + assert_allclose(pt.lambdas_, [1, 1, 1]) + + Xft = pt.fit_transform(X) + Xt = pt.transform(X) + + for Xt_ in [Xft, Xt]: + if standardize: + assert_allclose(Xt_, np.zeros_like(X)) + else: + assert_allclose(Xt_, X) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_discretization.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_discretization.py new file mode 100644 index 0000000000000000000000000000000000000000..46ec86f7a75d43378614639f04e3c72e7e69aede --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_discretization.py @@ -0,0 +1,503 @@ +import warnings + +import numpy as np +import pytest +import scipy.sparse as sp + +from sklearn import clone +from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + assert_array_almost_equal, + assert_array_equal, +) + +X = [[-2, 1.5, -4, -1], [-1, 2.5, -3, -0.5], [0, 3.5, -2, 0.5], [1, 4.5, -1, 2]] + + +@pytest.mark.parametrize( + "strategy, expected, sample_weight", + [ + ("uniform", [[0, 0, 0, 0], [1, 1, 1, 0], [2, 2, 2, 1], [2, 2, 2, 2]], None), + ("kmeans", [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]], None), + ("quantile", [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]], None), + ( + "quantile", + [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]], + [1, 1, 2, 1], + ), + ( + "quantile", + [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]], + [1, 1, 1, 1], + ), + ( + "quantile", + [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]], + [0, 1, 1, 1], + ), + ( + "kmeans", + [[0, 0, 0, 0], [1, 1, 1, 0], [1, 1, 1, 1], [2, 2, 2, 2]], + [1, 0, 3, 1], + ), + ( + "kmeans", + [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]], + [1, 1, 1, 1], + ), + ], +) +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +def test_fit_transform(strategy, expected, sample_weight): + est = KBinsDiscretizer(n_bins=3, encode="ordinal", strategy=strategy) + est.fit(X, sample_weight=sample_weight) + assert_array_equal(expected, est.transform(X)) + + +def test_valid_n_bins(): + KBinsDiscretizer(n_bins=2).fit_transform(X) + KBinsDiscretizer(n_bins=np.array([2])[0]).fit_transform(X) + assert KBinsDiscretizer(n_bins=2).fit(X).n_bins_.dtype == np.dtype(int) + + +@pytest.mark.parametrize("strategy", ["uniform"]) +def test_kbinsdiscretizer_wrong_strategy_with_weights(strategy): + """Check that we raise an error when the wrong strategy is used.""" + sample_weight = np.ones(shape=(len(X))) + est = KBinsDiscretizer(n_bins=3, strategy=strategy) + err_msg = ( + "`sample_weight` was provided but it cannot be used with strategy='uniform'." + ) + with pytest.raises(ValueError, match=err_msg): + est.fit(X, sample_weight=sample_weight) + + +def test_invalid_n_bins_array(): + # Bad shape + n_bins = np.full((2, 4), 2.0) + est = KBinsDiscretizer(n_bins=n_bins) + err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)." + with pytest.raises(ValueError, match=err_msg): + est.fit_transform(X) + + # Incorrect number of features + n_bins = [1, 2, 2] + est = KBinsDiscretizer(n_bins=n_bins) + err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)." + with pytest.raises(ValueError, match=err_msg): + est.fit_transform(X) + + # Bad bin values + n_bins = [1, 2, 2, 1] + est = KBinsDiscretizer(n_bins=n_bins) + err_msg = ( + "KBinsDiscretizer received an invalid number of bins " + "at indices 0, 3. Number of bins must be at least 2, " + "and must be an int." + ) + with pytest.raises(ValueError, match=err_msg): + est.fit_transform(X) + + # Float bin values + n_bins = [2.1, 2, 2.1, 2] + est = KBinsDiscretizer(n_bins=n_bins) + err_msg = ( + "KBinsDiscretizer received an invalid number of bins " + "at indices 0, 2. Number of bins must be at least 2, " + "and must be an int." + ) + with pytest.raises(ValueError, match=err_msg): + est.fit_transform(X) + + +@pytest.mark.parametrize( + "strategy, expected, sample_weight", + [ + ("uniform", [[0, 0, 0, 0], [0, 1, 1, 0], [1, 2, 2, 1], [1, 2, 2, 2]], None), + ("kmeans", [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 2, 2, 2]], None), + ("quantile", [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]], None), + ( + "quantile", + [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]], + [1, 1, 3, 1], + ), + ( + "quantile", + [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]], + [0, 1, 3, 1], + ), + # ( + # "quantile", + # [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]], + # [1, 1, 1, 1], + # ), + # + # TODO: This test case above aims to test if the case where an array of + # ones passed in sample_weight parameter is equal to the case when + # sample_weight is None. + # Unfortunately, the behavior of `_weighted_percentile` when + # `sample_weight = [1, 1, 1, 1]` are currently not equivalent. + # This problem has been addressed in issue : + # https://github.com/scikit-learn/scikit-learn/issues/17370 + ( + "kmeans", + [[0, 0, 0, 0], [0, 1, 1, 0], [1, 1, 1, 1], [1, 2, 2, 2]], + [1, 0, 3, 1], + ), + ], +) +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +def test_fit_transform_n_bins_array(strategy, expected, sample_weight): + est = KBinsDiscretizer( + n_bins=[2, 3, 3, 3], encode="ordinal", strategy=strategy + ).fit(X, sample_weight=sample_weight) + assert_array_equal(expected, est.transform(X)) + + # test the shape of bin_edges_ + n_features = np.array(X).shape[1] + assert est.bin_edges_.shape == (n_features,) + for bin_edges, n_bins in zip(est.bin_edges_, est.n_bins_): + assert bin_edges.shape == (n_bins + 1,) + + +@pytest.mark.filterwarnings("ignore: Bins whose width are too small") +def test_kbinsdiscretizer_effect_sample_weight(): + """Check the impact of `sample_weight` one computed quantiles.""" + X = np.array([[-2], [-1], [1], [3], [500], [1000]]) + # add a large number of bins such that each sample with a non-null weight + # will be used as bin edge + est = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile") + est.fit(X, sample_weight=[1, 1, 1, 1, 0, 0]) + assert_allclose(est.bin_edges_[0], [-2, -1, 1, 3]) + assert_allclose(est.transform(X), [[0.0], [1.0], [2.0], [2.0], [2.0], [2.0]]) + + +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +@pytest.mark.parametrize("strategy", ["kmeans", "quantile"]) +def test_kbinsdiscretizer_no_mutating_sample_weight(strategy): + """Make sure that `sample_weight` is not changed in place.""" + est = KBinsDiscretizer(n_bins=3, encode="ordinal", strategy=strategy) + sample_weight = np.array([1, 3, 1, 2], dtype=np.float64) + sample_weight_copy = np.copy(sample_weight) + est.fit(X, sample_weight=sample_weight) + assert_allclose(sample_weight, sample_weight_copy) + + +@pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"]) +def test_same_min_max(strategy): + warnings.simplefilter("always") + X = np.array([[1, -2], [1, -1], [1, 0], [1, 1]]) + est = KBinsDiscretizer(strategy=strategy, n_bins=3, encode="ordinal") + warning_message = "Feature 0 is constant and will be replaced with 0." + with pytest.warns(UserWarning, match=warning_message): + est.fit(X) + assert est.n_bins_[0] == 1 + # replace the feature with zeros + Xt = est.transform(X) + assert_array_equal(Xt[:, 0], np.zeros(X.shape[0])) + + +def test_transform_1d_behavior(): + X = np.arange(4) + est = KBinsDiscretizer(n_bins=2) + with pytest.raises(ValueError): + est.fit(X) + + est = KBinsDiscretizer(n_bins=2) + est.fit(X.reshape(-1, 1)) + with pytest.raises(ValueError): + est.transform(X) + + +@pytest.mark.parametrize("i", range(1, 9)) +def test_numeric_stability(i): + X_init = np.array([2.0, 4.0, 6.0, 8.0, 10.0]).reshape(-1, 1) + Xt_expected = np.array([0, 0, 1, 1, 1]).reshape(-1, 1) + + # Test up to discretizing nano units + X = X_init / 10**i + Xt = KBinsDiscretizer(n_bins=2, encode="ordinal").fit_transform(X) + assert_array_equal(Xt_expected, Xt) + + +def test_encode_options(): + est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="ordinal").fit(X) + Xt_1 = est.transform(X) + est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="onehot-dense").fit(X) + Xt_2 = est.transform(X) + assert not sp.issparse(Xt_2) + assert_array_equal( + OneHotEncoder( + categories=[np.arange(i) for i in [2, 3, 3, 3]], sparse_output=False + ).fit_transform(Xt_1), + Xt_2, + ) + est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="onehot").fit(X) + Xt_3 = est.transform(X) + assert sp.issparse(Xt_3) + assert_array_equal( + OneHotEncoder( + categories=[np.arange(i) for i in [2, 3, 3, 3]], sparse_output=True + ) + .fit_transform(Xt_1) + .toarray(), + Xt_3.toarray(), + ) + + +@pytest.mark.parametrize( + "strategy, expected_2bins, expected_3bins, expected_5bins", + [ + ("uniform", [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 2, 2], [0, 0, 1, 1, 4, 4]), + ("kmeans", [0, 0, 0, 0, 1, 1], [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 3, 4]), + ("quantile", [0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 2, 2], [0, 1, 2, 3, 4, 4]), + ], +) +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +def test_nonuniform_strategies( + strategy, expected_2bins, expected_3bins, expected_5bins +): + X = np.array([0, 0.5, 2, 3, 9, 10]).reshape(-1, 1) + + # with 2 bins + est = KBinsDiscretizer(n_bins=2, strategy=strategy, encode="ordinal") + Xt = est.fit_transform(X) + assert_array_equal(expected_2bins, Xt.ravel()) + + # with 3 bins + est = KBinsDiscretizer(n_bins=3, strategy=strategy, encode="ordinal") + Xt = est.fit_transform(X) + assert_array_equal(expected_3bins, Xt.ravel()) + + # with 5 bins + est = KBinsDiscretizer(n_bins=5, strategy=strategy, encode="ordinal") + Xt = est.fit_transform(X) + assert_array_equal(expected_5bins, Xt.ravel()) + + +@pytest.mark.parametrize( + "strategy, expected_inv", + [ + ( + "uniform", + [ + [-1.5, 2.0, -3.5, -0.5], + [-0.5, 3.0, -2.5, -0.5], + [0.5, 4.0, -1.5, 0.5], + [0.5, 4.0, -1.5, 1.5], + ], + ), + ( + "kmeans", + [ + [-1.375, 2.125, -3.375, -0.5625], + [-1.375, 2.125, -3.375, -0.5625], + [-0.125, 3.375, -2.125, 0.5625], + [0.75, 4.25, -1.25, 1.625], + ], + ), + ( + "quantile", + [ + [-1.5, 2.0, -3.5, -0.75], + [-0.5, 3.0, -2.5, 0.0], + [0.5, 4.0, -1.5, 1.25], + [0.5, 4.0, -1.5, 1.25], + ], + ), + ], +) +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +@pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"]) +def test_inverse_transform(strategy, encode, expected_inv): + kbd = KBinsDiscretizer(n_bins=3, strategy=strategy, encode=encode) + Xt = kbd.fit_transform(X) + Xinv = kbd.inverse_transform(Xt) + assert_array_almost_equal(expected_inv, Xinv) + + +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +@pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"]) +def test_transform_outside_fit_range(strategy): + X = np.array([0, 1, 2, 3])[:, None] + kbd = KBinsDiscretizer(n_bins=4, strategy=strategy, encode="ordinal") + kbd.fit(X) + + X2 = np.array([-2, 5])[:, None] + X2t = kbd.transform(X2) + assert_array_equal(X2t.max(axis=0) + 1, kbd.n_bins_) + assert_array_equal(X2t.min(axis=0), [0]) + + +def test_overwrite(): + X = np.array([0, 1, 2, 3])[:, None] + X_before = X.copy() + + est = KBinsDiscretizer(n_bins=3, encode="ordinal") + Xt = est.fit_transform(X) + assert_array_equal(X, X_before) + + Xt_before = Xt.copy() + Xinv = est.inverse_transform(Xt) + assert_array_equal(Xt, Xt_before) + assert_array_equal(Xinv, np.array([[0.5], [1.5], [2.5], [2.5]])) + + +@pytest.mark.parametrize( + "strategy, expected_bin_edges", [("quantile", [0, 1, 3]), ("kmeans", [0, 1.5, 3])] +) +def test_redundant_bins(strategy, expected_bin_edges): + X = [[0], [0], [0], [0], [3], [3]] + kbd = KBinsDiscretizer(n_bins=3, strategy=strategy, subsample=None) + warning_message = "Consider decreasing the number of bins." + with pytest.warns(UserWarning, match=warning_message): + kbd.fit(X) + assert_array_almost_equal(kbd.bin_edges_[0], expected_bin_edges) + + +def test_percentile_numeric_stability(): + X = np.array([0.05, 0.05, 0.95]).reshape(-1, 1) + bin_edges = np.array([0.05, 0.23, 0.41, 0.59, 0.77, 0.95]) + Xt = np.array([0, 0, 4]).reshape(-1, 1) + kbd = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile") + warning_message = "Consider decreasing the number of bins." + with pytest.warns(UserWarning, match=warning_message): + kbd.fit(X) + + assert_array_almost_equal(kbd.bin_edges_[0], bin_edges) + assert_array_almost_equal(kbd.transform(X), Xt) + + +@pytest.mark.parametrize("in_dtype", [np.float16, np.float32, np.float64]) +@pytest.mark.parametrize("out_dtype", [None, np.float32, np.float64]) +@pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"]) +def test_consistent_dtype(in_dtype, out_dtype, encode): + X_input = np.array(X, dtype=in_dtype) + kbd = KBinsDiscretizer(n_bins=3, encode=encode, dtype=out_dtype) + kbd.fit(X_input) + + # test output dtype + if out_dtype is not None: + expected_dtype = out_dtype + elif out_dtype is None and X_input.dtype == np.float16: + # wrong numeric input dtype are cast in np.float64 + expected_dtype = np.float64 + else: + expected_dtype = X_input.dtype + Xt = kbd.transform(X_input) + assert Xt.dtype == expected_dtype + + +@pytest.mark.parametrize("input_dtype", [np.float16, np.float32, np.float64]) +@pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"]) +def test_32_equal_64(input_dtype, encode): + # TODO this check is redundant with common checks and can be removed + # once #16290 is merged + X_input = np.array(X, dtype=input_dtype) + + # 32 bit output + kbd_32 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float32) + kbd_32.fit(X_input) + Xt_32 = kbd_32.transform(X_input) + + # 64 bit output + kbd_64 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float64) + kbd_64.fit(X_input) + Xt_64 = kbd_64.transform(X_input) + + assert_allclose_dense_sparse(Xt_32, Xt_64) + + +def test_kbinsdiscretizer_subsample_default(): + # Since the size of X is small (< 2e5), subsampling will not take place. + X = np.array([-2, 1.5, -4, -1]).reshape(-1, 1) + kbd_default = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile") + kbd_default.fit(X) + + kbd_without_subsampling = clone(kbd_default) + kbd_without_subsampling.set_params(subsample=None) + kbd_without_subsampling.fit(X) + + for bin_kbd_default, bin_kbd_with_subsampling in zip( + kbd_default.bin_edges_[0], kbd_without_subsampling.bin_edges_[0] + ): + np.testing.assert_allclose(bin_kbd_default, bin_kbd_with_subsampling) + assert kbd_default.bin_edges_.shape == kbd_without_subsampling.bin_edges_.shape + + +@pytest.mark.parametrize( + "encode, expected_names", + [ + ( + "onehot", + [ + f"feat{col_id}_{float(bin_id)}" + for col_id in range(3) + for bin_id in range(4) + ], + ), + ( + "onehot-dense", + [ + f"feat{col_id}_{float(bin_id)}" + for col_id in range(3) + for bin_id in range(4) + ], + ), + ("ordinal", [f"feat{col_id}" for col_id in range(3)]), + ], +) +def test_kbinsdiscrtizer_get_feature_names_out(encode, expected_names): + """Check get_feature_names_out for different settings. + Non-regression test for #22731 + """ + X = [[-2, 1, -4], [-1, 2, -3], [0, 3, -2], [1, 4, -1]] + + kbd = KBinsDiscretizer(n_bins=4, encode=encode).fit(X) + Xt = kbd.transform(X) + + input_features = [f"feat{i}" for i in range(3)] + output_names = kbd.get_feature_names_out(input_features) + assert Xt.shape[1] == output_names.shape[0] + + assert_array_equal(output_names, expected_names) + + +@pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"]) +def test_kbinsdiscretizer_subsample(strategy, global_random_seed): + # Check that the bin edges are almost the same when subsampling is used. + X = np.random.RandomState(global_random_seed).random_sample((100000, 1)) + 1 + + kbd_subsampling = KBinsDiscretizer( + strategy=strategy, subsample=50000, random_state=global_random_seed + ) + kbd_subsampling.fit(X) + + kbd_no_subsampling = clone(kbd_subsampling) + kbd_no_subsampling.set_params(subsample=None) + kbd_no_subsampling.fit(X) + + # We use a large tolerance because we can't expect the bin edges to be exactly the + # same when subsampling is used. + assert_allclose( + kbd_subsampling.bin_edges_[0], kbd_no_subsampling.bin_edges_[0], rtol=1e-2 + ) + + +# TODO(1.5) remove this test +@pytest.mark.parametrize("strategy", ["uniform", "kmeans"]) +def test_kbd_subsample_warning(strategy): + # Check the future warning for the change of default of subsample + X = np.random.RandomState(0).random_sample((100, 1)) + + kbd = KBinsDiscretizer(strategy=strategy, random_state=0) + with pytest.warns(FutureWarning, match="subsample=200_000 will be used by default"): + kbd.fit(X) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_encoders.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_encoders.py new file mode 100644 index 0000000000000000000000000000000000000000..ee5e1152fc710e5791e446ca8ffe0bc87beb001b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_encoders.py @@ -0,0 +1,2338 @@ +import re + +import numpy as np +import pytest +from scipy import sparse + +from sklearn.exceptions import NotFittedError +from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder +from sklearn.utils import is_scalar_nan +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def test_one_hot_encoder_sparse_dense(): + # check that sparse and dense will give the same results + + X = np.array([[3, 2, 1], [0, 1, 1]]) + enc_sparse = OneHotEncoder() + enc_dense = OneHotEncoder(sparse_output=False) + + X_trans_sparse = enc_sparse.fit_transform(X) + X_trans_dense = enc_dense.fit_transform(X) + + assert X_trans_sparse.shape == (2, 5) + assert X_trans_dense.shape == (2, 5) + + assert sparse.issparse(X_trans_sparse) + assert not sparse.issparse(X_trans_dense) + + # check outcome + assert_array_equal( + X_trans_sparse.toarray(), [[0.0, 1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0, 1.0]] + ) + assert_array_equal(X_trans_sparse.toarray(), X_trans_dense) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_one_hot_encoder_handle_unknown(handle_unknown): + X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]]) + X2 = np.array([[4, 1, 1]]) + + # Test that one hot encoder raises error for unknown features + # present during transform. + oh = OneHotEncoder(handle_unknown="error") + oh.fit(X) + with pytest.raises(ValueError, match="Found unknown categories"): + oh.transform(X2) + + # Test the ignore option, ignores unknown features (giving all 0's) + oh = OneHotEncoder(handle_unknown=handle_unknown) + oh.fit(X) + X2_passed = X2.copy() + assert_array_equal( + oh.transform(X2_passed).toarray(), + np.array([[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]]), + ) + # ensure transformed data was not modified in place + assert_allclose(X2, X2_passed) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_one_hot_encoder_handle_unknown_strings(handle_unknown): + X = np.array(["11111111", "22", "333", "4444"]).reshape((-1, 1)) + X2 = np.array(["55555", "22"]).reshape((-1, 1)) + # Non Regression test for the issue #12470 + # Test the ignore option, when categories are numpy string dtype + # particularly when the known category strings are larger + # than the unknown category strings + oh = OneHotEncoder(handle_unknown=handle_unknown) + oh.fit(X) + X2_passed = X2.copy() + assert_array_equal( + oh.transform(X2_passed).toarray(), + np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]), + ) + # ensure transformed data was not modified in place + assert_array_equal(X2, X2_passed) + + +@pytest.mark.parametrize("output_dtype", [np.int32, np.float32, np.float64]) +@pytest.mark.parametrize("input_dtype", [np.int32, np.float32, np.float64]) +def test_one_hot_encoder_dtype(input_dtype, output_dtype): + X = np.asarray([[0, 1]], dtype=input_dtype).T + X_expected = np.asarray([[1, 0], [0, 1]], dtype=output_dtype) + + oh = OneHotEncoder(categories="auto", dtype=output_dtype) + assert_array_equal(oh.fit_transform(X).toarray(), X_expected) + assert_array_equal(oh.fit(X).transform(X).toarray(), X_expected) + + oh = OneHotEncoder(categories="auto", dtype=output_dtype, sparse_output=False) + assert_array_equal(oh.fit_transform(X), X_expected) + assert_array_equal(oh.fit(X).transform(X), X_expected) + + +@pytest.mark.parametrize("output_dtype", [np.int32, np.float32, np.float64]) +def test_one_hot_encoder_dtype_pandas(output_dtype): + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) + X_expected = np.array([[1, 0, 1, 0], [0, 1, 0, 1]], dtype=output_dtype) + + oh = OneHotEncoder(dtype=output_dtype) + assert_array_equal(oh.fit_transform(X_df).toarray(), X_expected) + assert_array_equal(oh.fit(X_df).transform(X_df).toarray(), X_expected) + + oh = OneHotEncoder(dtype=output_dtype, sparse_output=False) + assert_array_equal(oh.fit_transform(X_df), X_expected) + assert_array_equal(oh.fit(X_df).transform(X_df), X_expected) + + +def test_one_hot_encoder_feature_names(): + enc = OneHotEncoder() + X = [ + ["Male", 1, "girl", 2, 3], + ["Female", 41, "girl", 1, 10], + ["Male", 51, "boy", 12, 3], + ["Male", 91, "girl", 21, 30], + ] + + enc.fit(X) + feature_names = enc.get_feature_names_out() + + assert_array_equal( + [ + "x0_Female", + "x0_Male", + "x1_1", + "x1_41", + "x1_51", + "x1_91", + "x2_boy", + "x2_girl", + "x3_1", + "x3_2", + "x3_12", + "x3_21", + "x4_3", + "x4_10", + "x4_30", + ], + feature_names, + ) + + feature_names2 = enc.get_feature_names_out(["one", "two", "three", "four", "five"]) + + assert_array_equal( + [ + "one_Female", + "one_Male", + "two_1", + "two_41", + "two_51", + "two_91", + "three_boy", + "three_girl", + "four_1", + "four_2", + "four_12", + "four_21", + "five_3", + "five_10", + "five_30", + ], + feature_names2, + ) + + with pytest.raises(ValueError, match="input_features should have length"): + enc.get_feature_names_out(["one", "two"]) + + +def test_one_hot_encoder_feature_names_unicode(): + enc = OneHotEncoder() + X = np.array([["c❤t1", "dat2"]], dtype=object).T + enc.fit(X) + feature_names = enc.get_feature_names_out() + assert_array_equal(["x0_c❤t1", "x0_dat2"], feature_names) + feature_names = enc.get_feature_names_out(input_features=["n👍me"]) + assert_array_equal(["n👍me_c❤t1", "n👍me_dat2"], feature_names) + + +def test_one_hot_encoder_custom_feature_name_combiner(): + """Check the behaviour of `feature_name_combiner` as a callable.""" + + def name_combiner(feature, category): + return feature + "_" + repr(category) + + enc = OneHotEncoder(feature_name_combiner=name_combiner) + X = np.array([["None", None]], dtype=object).T + enc.fit(X) + feature_names = enc.get_feature_names_out() + assert_array_equal(["x0_'None'", "x0_None"], feature_names) + feature_names = enc.get_feature_names_out(input_features=["a"]) + assert_array_equal(["a_'None'", "a_None"], feature_names) + + def wrong_combiner(feature, category): + # we should be returning a Python string + return 0 + + enc = OneHotEncoder(feature_name_combiner=wrong_combiner).fit(X) + err_msg = ( + "When `feature_name_combiner` is a callable, it should return a Python string." + ) + with pytest.raises(TypeError, match=err_msg): + enc.get_feature_names_out() + + +def test_one_hot_encoder_set_params(): + X = np.array([[1, 2]]).T + oh = OneHotEncoder() + # set params on not yet fitted object + oh.set_params(categories=[[0, 1, 2, 3]]) + assert oh.get_params()["categories"] == [[0, 1, 2, 3]] + assert oh.fit_transform(X).toarray().shape == (2, 4) + # set params on already fitted object + oh.set_params(categories=[[0, 1, 2, 3, 4]]) + assert oh.fit_transform(X).toarray().shape == (2, 5) + + +def check_categorical_onehot(X): + enc = OneHotEncoder(categories="auto") + Xtr1 = enc.fit_transform(X) + + enc = OneHotEncoder(categories="auto", sparse_output=False) + Xtr2 = enc.fit_transform(X) + + assert_allclose(Xtr1.toarray(), Xtr2) + + assert sparse.issparse(Xtr1) and Xtr1.format == "csr" + return Xtr1.toarray() + + +@pytest.mark.parametrize( + "X", + [ + [["def", 1, 55], ["abc", 2, 55]], + np.array([[10, 1, 55], [5, 2, 55]]), + np.array([["b", "A", "cat"], ["a", "B", "cat"]], dtype=object), + np.array([["b", 1, "cat"], ["a", np.nan, "cat"]], dtype=object), + np.array([["b", 1, "cat"], ["a", float("nan"), "cat"]], dtype=object), + np.array([[None, 1, "cat"], ["a", 2, "cat"]], dtype=object), + np.array([[None, 1, None], ["a", np.nan, None]], dtype=object), + np.array([[None, 1, None], ["a", float("nan"), None]], dtype=object), + ], + ids=[ + "mixed", + "numeric", + "object", + "mixed-nan", + "mixed-float-nan", + "mixed-None", + "mixed-None-nan", + "mixed-None-float-nan", + ], +) +def test_one_hot_encoder(X): + Xtr = check_categorical_onehot(np.array(X)[:, [0]]) + assert_allclose(Xtr, [[0, 1], [1, 0]]) + + Xtr = check_categorical_onehot(np.array(X)[:, [0, 1]]) + assert_allclose(Xtr, [[0, 1, 1, 0], [1, 0, 0, 1]]) + + Xtr = OneHotEncoder(categories="auto").fit_transform(X) + assert_allclose(Xtr.toarray(), [[0, 1, 1, 0, 1], [1, 0, 0, 1, 1]]) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +@pytest.mark.parametrize("sparse_", [False, True]) +@pytest.mark.parametrize("drop", [None, "first"]) +def test_one_hot_encoder_inverse(handle_unknown, sparse_, drop): + X = [["abc", 2, 55], ["def", 1, 55], ["abc", 3, 55]] + enc = OneHotEncoder(sparse_output=sparse_, drop=drop) + X_tr = enc.fit_transform(X) + exp = np.array(X, dtype=object) + assert_array_equal(enc.inverse_transform(X_tr), exp) + + X = [[2, 55], [1, 55], [3, 55]] + enc = OneHotEncoder(sparse_output=sparse_, categories="auto", drop=drop) + X_tr = enc.fit_transform(X) + exp = np.array(X) + assert_array_equal(enc.inverse_transform(X_tr), exp) + + if drop is None: + # with unknown categories + # drop is incompatible with handle_unknown=ignore + X = [["abc", 2, 55], ["def", 1, 55], ["abc", 3, 55]] + enc = OneHotEncoder( + sparse_output=sparse_, + handle_unknown=handle_unknown, + categories=[["abc", "def"], [1, 2], [54, 55, 56]], + ) + X_tr = enc.fit_transform(X) + exp = np.array(X, dtype=object) + exp[2, 1] = None + assert_array_equal(enc.inverse_transform(X_tr), exp) + + # with an otherwise numerical output, still object if unknown + X = [[2, 55], [1, 55], [3, 55]] + enc = OneHotEncoder( + sparse_output=sparse_, + categories=[[1, 2], [54, 56]], + handle_unknown=handle_unknown, + ) + X_tr = enc.fit_transform(X) + exp = np.array(X, dtype=object) + exp[2, 0] = None + exp[:, 1] = None + assert_array_equal(enc.inverse_transform(X_tr), exp) + + # incorrect shape raises + X_tr = np.array([[0, 1, 1], [1, 0, 1]]) + msg = re.escape("Shape of the passed X data is not correct") + with pytest.raises(ValueError, match=msg): + enc.inverse_transform(X_tr) + + +@pytest.mark.parametrize("sparse_", [False, True]) +@pytest.mark.parametrize( + "X, X_trans", + [ + ([[2, 55], [1, 55], [2, 55]], [[0, 1, 1], [0, 0, 0], [0, 1, 1]]), + ( + [["one", "a"], ["two", "a"], ["three", "b"], ["two", "a"]], + [[0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 1, 0, 0, 0]], + ), + ], +) +def test_one_hot_encoder_inverse_transform_raise_error_with_unknown( + X, X_trans, sparse_ +): + """Check that `inverse_transform` raise an error with unknown samples, no + dropped feature, and `handle_unknow="error`. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/14934 + """ + enc = OneHotEncoder(sparse_output=sparse_).fit(X) + msg = ( + r"Samples \[(\d )*\d\] can not be inverted when drop=None and " + r"handle_unknown='error' because they contain all zeros" + ) + + if sparse_: + # emulate sparse data transform by a one-hot encoder sparse. + X_trans = _convert_container(X_trans, "sparse") + with pytest.raises(ValueError, match=msg): + enc.inverse_transform(X_trans) + + +def test_one_hot_encoder_inverse_if_binary(): + X = np.array([["Male", 1], ["Female", 3], ["Female", 2]], dtype=object) + ohe = OneHotEncoder(drop="if_binary", sparse_output=False) + X_tr = ohe.fit_transform(X) + assert_array_equal(ohe.inverse_transform(X_tr), X) + + +@pytest.mark.parametrize("drop", ["if_binary", "first", None]) +@pytest.mark.parametrize("reset_drop", ["if_binary", "first", None]) +def test_one_hot_encoder_drop_reset(drop, reset_drop): + # check that resetting drop option without refitting does not throw an error + X = np.array([["Male", 1], ["Female", 3], ["Female", 2]], dtype=object) + ohe = OneHotEncoder(drop=drop, sparse_output=False) + ohe.fit(X) + X_tr = ohe.transform(X) + feature_names = ohe.get_feature_names_out() + ohe.set_params(drop=reset_drop) + assert_array_equal(ohe.inverse_transform(X_tr), X) + assert_allclose(ohe.transform(X), X_tr) + assert_array_equal(ohe.get_feature_names_out(), feature_names) + + +@pytest.mark.parametrize("method", ["fit", "fit_transform"]) +@pytest.mark.parametrize("X", [[1, 2], np.array([3.0, 4.0])]) +def test_X_is_not_1D(X, method): + oh = OneHotEncoder() + + msg = "Expected 2D array, got 1D array instead" + with pytest.raises(ValueError, match=msg): + getattr(oh, method)(X) + + +@pytest.mark.parametrize("method", ["fit", "fit_transform"]) +def test_X_is_not_1D_pandas(method): + pd = pytest.importorskip("pandas") + X = pd.Series([6, 3, 4, 6]) + oh = OneHotEncoder() + + msg = f"Expected a 2-dimensional container but got {type(X)} instead." + with pytest.raises(ValueError, match=msg): + getattr(oh, method)(X) + + +@pytest.mark.parametrize( + "X, cat_exp, cat_dtype", + [ + ([["abc", 55], ["def", 55]], [["abc", "def"], [55]], np.object_), + (np.array([[1, 2], [3, 2]]), [[1, 3], [2]], np.integer), + ( + np.array([["A", "cat"], ["B", "cat"]], dtype=object), + [["A", "B"], ["cat"]], + np.object_, + ), + (np.array([["A", "cat"], ["B", "cat"]]), [["A", "B"], ["cat"]], np.str_), + (np.array([[1, 2], [np.nan, 2]]), [[1, np.nan], [2]], np.float64), + ( + np.array([["A", np.nan], [None, np.nan]], dtype=object), + [["A", None], [np.nan]], + np.object_, + ), + ( + np.array([["A", float("nan")], [None, float("nan")]], dtype=object), + [["A", None], [float("nan")]], + np.object_, + ), + ], + ids=[ + "mixed", + "numeric", + "object", + "string", + "missing-float", + "missing-np.nan-object", + "missing-float-nan-object", + ], +) +def test_one_hot_encoder_categories(X, cat_exp, cat_dtype): + # order of categories should not depend on order of samples + for Xi in [X, X[::-1]]: + enc = OneHotEncoder(categories="auto") + enc.fit(Xi) + # assert enc.categories == 'auto' + assert isinstance(enc.categories_, list) + for res, exp in zip(enc.categories_, cat_exp): + res_list = res.tolist() + if is_scalar_nan(exp[-1]): + assert is_scalar_nan(res_list[-1]) + assert res_list[:-1] == exp[:-1] + else: + assert res.tolist() == exp + assert np.issubdtype(res.dtype, cat_dtype) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +@pytest.mark.parametrize( + "X, X2, cats, cat_dtype", + [ + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", "d"]], dtype=object).T, + [["a", "b", "c"]], + np.object_, + ), + ( + np.array([[1, 2]], dtype="int64").T, + np.array([[1, 4]], dtype="int64").T, + [[1, 2, 3]], + np.int64, + ), + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", "d"]], dtype=object).T, + [np.array(["a", "b", "c"])], + np.object_, + ), + ( + np.array([[None, "a"]], dtype=object).T, + np.array([[None, "b"]], dtype=object).T, + [[None, "a", "z"]], + object, + ), + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", np.nan]], dtype=object).T, + [["a", "b", "z"]], + object, + ), + ( + np.array([["a", None]], dtype=object).T, + np.array([["a", np.nan]], dtype=object).T, + [["a", None, "z"]], + object, + ), + ], + ids=[ + "object", + "numeric", + "object-string", + "object-string-none", + "object-string-nan", + "object-None-and-nan", + ], +) +def test_one_hot_encoder_specified_categories(X, X2, cats, cat_dtype, handle_unknown): + enc = OneHotEncoder(categories=cats) + exp = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]) + assert_array_equal(enc.fit_transform(X).toarray(), exp) + assert list(enc.categories[0]) == list(cats[0]) + assert enc.categories_[0].tolist() == list(cats[0]) + # manually specified categories should have same dtype as + # the data when coerced from lists + assert enc.categories_[0].dtype == cat_dtype + + # when specifying categories manually, unknown categories should already + # raise when fitting + enc = OneHotEncoder(categories=cats) + with pytest.raises(ValueError, match="Found unknown categories"): + enc.fit(X2) + enc = OneHotEncoder(categories=cats, handle_unknown=handle_unknown) + exp = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) + assert_array_equal(enc.fit(X2).transform(X2).toarray(), exp) + + +def test_one_hot_encoder_unsorted_categories(): + X = np.array([["a", "b"]], dtype=object).T + + enc = OneHotEncoder(categories=[["b", "a", "c"]]) + exp = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]) + assert_array_equal(enc.fit(X).transform(X).toarray(), exp) + assert_array_equal(enc.fit_transform(X).toarray(), exp) + assert enc.categories_[0].tolist() == ["b", "a", "c"] + assert np.issubdtype(enc.categories_[0].dtype, np.object_) + + # unsorted passed categories still raise for numerical values + X = np.array([[1, 2]]).T + enc = OneHotEncoder(categories=[[2, 1, 3]]) + msg = "Unsorted categories are not supported" + with pytest.raises(ValueError, match=msg): + enc.fit_transform(X) + + +@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) +def test_encoder_nan_ending_specified_categories(Encoder): + """Test encoder for specified categories that nan is at the end. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27088 + """ + cats = [np.array([0, np.nan, 1])] + enc = Encoder(categories=cats) + X = np.array([[0, 1]], dtype=object).T + with pytest.raises(ValueError, match="Nan should be the last element"): + enc.fit(X) + + +def test_one_hot_encoder_specified_categories_mixed_columns(): + # multiple columns + X = np.array([["a", "b"], [0, 2]], dtype=object).T + enc = OneHotEncoder(categories=[["a", "b", "c"], [0, 1, 2]]) + exp = np.array([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 1.0]]) + assert_array_equal(enc.fit_transform(X).toarray(), exp) + assert enc.categories_[0].tolist() == ["a", "b", "c"] + assert np.issubdtype(enc.categories_[0].dtype, np.object_) + assert enc.categories_[1].tolist() == [0, 1, 2] + # integer categories but from object dtype data + assert np.issubdtype(enc.categories_[1].dtype, np.object_) + + +def test_one_hot_encoder_pandas(): + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) + + Xtr = check_categorical_onehot(X_df) + assert_allclose(Xtr, [[1, 0, 1, 0], [0, 1, 0, 1]]) + + +@pytest.mark.parametrize( + "drop, expected_names", + [ + ("first", ["x0_c", "x2_b"]), + ("if_binary", ["x0_c", "x1_2", "x2_b"]), + (["c", 2, "b"], ["x0_b", "x2_a"]), + ], + ids=["first", "binary", "manual"], +) +def test_one_hot_encoder_feature_names_drop(drop, expected_names): + X = [["c", 2, "a"], ["b", 2, "b"]] + + ohe = OneHotEncoder(drop=drop) + ohe.fit(X) + feature_names = ohe.get_feature_names_out() + assert_array_equal(expected_names, feature_names) + + +def test_one_hot_encoder_drop_equals_if_binary(): + # Canonical case + X = [[10, "yes"], [20, "no"], [30, "yes"]] + expected = np.array( + [[1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]] + ) + expected_drop_idx = np.array([None, 0]) + + ohe = OneHotEncoder(drop="if_binary", sparse_output=False) + result = ohe.fit_transform(X) + assert_array_equal(ohe.drop_idx_, expected_drop_idx) + assert_allclose(result, expected) + + # with only one cat, the behaviour is equivalent to drop=None + X = [["true", "a"], ["false", "a"], ["false", "a"]] + expected = np.array([[1.0, 1.0], [0.0, 1.0], [0.0, 1.0]]) + expected_drop_idx = np.array([0, None]) + + ohe = OneHotEncoder(drop="if_binary", sparse_output=False) + result = ohe.fit_transform(X) + assert_array_equal(ohe.drop_idx_, expected_drop_idx) + assert_allclose(result, expected) + + +@pytest.mark.parametrize( + "X", + [ + [["abc", 2, 55], ["def", 1, 55]], + np.array([[10, 2, 55], [20, 1, 55]]), + np.array([["a", "B", "cat"], ["b", "A", "cat"]], dtype=object), + ], + ids=["mixed", "numeric", "object"], +) +def test_ordinal_encoder(X): + enc = OrdinalEncoder() + exp = np.array([[0, 1, 0], [1, 0, 0]], dtype="int64") + assert_array_equal(enc.fit_transform(X), exp.astype("float64")) + enc = OrdinalEncoder(dtype="int64") + assert_array_equal(enc.fit_transform(X), exp) + + +@pytest.mark.parametrize( + "X, X2, cats, cat_dtype", + [ + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", "d"]], dtype=object).T, + [["a", "b", "c"]], + np.object_, + ), + ( + np.array([[1, 2]], dtype="int64").T, + np.array([[1, 4]], dtype="int64").T, + [[1, 2, 3]], + np.int64, + ), + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", "d"]], dtype=object).T, + [np.array(["a", "b", "c"])], + np.object_, + ), + ], + ids=["object", "numeric", "object-string-cat"], +) +def test_ordinal_encoder_specified_categories(X, X2, cats, cat_dtype): + enc = OrdinalEncoder(categories=cats) + exp = np.array([[0.0], [1.0]]) + assert_array_equal(enc.fit_transform(X), exp) + assert list(enc.categories[0]) == list(cats[0]) + assert enc.categories_[0].tolist() == list(cats[0]) + # manually specified categories should have same dtype as + # the data when coerced from lists + assert enc.categories_[0].dtype == cat_dtype + + # when specifying categories manually, unknown categories should already + # raise when fitting + enc = OrdinalEncoder(categories=cats) + with pytest.raises(ValueError, match="Found unknown categories"): + enc.fit(X2) + + +def test_ordinal_encoder_inverse(): + X = [["abc", 2, 55], ["def", 1, 55]] + enc = OrdinalEncoder() + X_tr = enc.fit_transform(X) + exp = np.array(X, dtype=object) + assert_array_equal(enc.inverse_transform(X_tr), exp) + + # incorrect shape raises + X_tr = np.array([[0, 1, 1, 2], [1, 0, 1, 0]]) + msg = re.escape("Shape of the passed X data is not correct") + with pytest.raises(ValueError, match=msg): + enc.inverse_transform(X_tr) + + +def test_ordinal_encoder_handle_unknowns_string(): + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-2) + X_fit = np.array([["a", "x"], ["b", "y"], ["c", "z"]], dtype=object) + X_trans = np.array([["c", "xy"], ["bla", "y"], ["a", "x"]], dtype=object) + enc.fit(X_fit) + + X_trans_enc = enc.transform(X_trans) + exp = np.array([[2, -2], [-2, 1], [0, 0]], dtype="int64") + assert_array_equal(X_trans_enc, exp) + + X_trans_inv = enc.inverse_transform(X_trans_enc) + inv_exp = np.array([["c", None], [None, "y"], ["a", "x"]], dtype=object) + assert_array_equal(X_trans_inv, inv_exp) + + +@pytest.mark.parametrize("dtype", [float, int]) +def test_ordinal_encoder_handle_unknowns_numeric(dtype): + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-999) + X_fit = np.array([[1, 7], [2, 8], [3, 9]], dtype=dtype) + X_trans = np.array([[3, 12], [23, 8], [1, 7]], dtype=dtype) + enc.fit(X_fit) + + X_trans_enc = enc.transform(X_trans) + exp = np.array([[2, -999], [-999, 1], [0, 0]], dtype="int64") + assert_array_equal(X_trans_enc, exp) + + X_trans_inv = enc.inverse_transform(X_trans_enc) + inv_exp = np.array([[3, None], [None, 8], [1, 7]], dtype=object) + assert_array_equal(X_trans_inv, inv_exp) + + +def test_ordinal_encoder_handle_unknowns_nan(): + # Make sure unknown_value=np.nan properly works + + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=np.nan) + + X_fit = np.array([[1], [2], [3]]) + enc.fit(X_fit) + X_trans = enc.transform([[1], [2], [4]]) + assert_array_equal(X_trans, [[0], [1], [np.nan]]) + + +def test_ordinal_encoder_handle_unknowns_nan_non_float_dtype(): + # Make sure an error is raised when unknown_value=np.nan and the dtype + # isn't a float dtype + enc = OrdinalEncoder( + handle_unknown="use_encoded_value", unknown_value=np.nan, dtype=int + ) + + X_fit = np.array([[1], [2], [3]]) + with pytest.raises(ValueError, match="dtype parameter should be a float dtype"): + enc.fit(X_fit) + + +def test_ordinal_encoder_raise_categories_shape(): + X = np.array([["Low", "Medium", "High", "Medium", "Low"]], dtype=object).T + cats = ["Low", "Medium", "High"] + enc = OrdinalEncoder(categories=cats) + msg = "Shape mismatch: if categories is an array," + + with pytest.raises(ValueError, match=msg): + enc.fit(X) + + +def test_encoder_dtypes(): + # check that dtypes are preserved when determining categories + enc = OneHotEncoder(categories="auto") + exp = np.array([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]], dtype="float64") + + for X in [ + np.array([[1, 2], [3, 4]], dtype="int64"), + np.array([[1, 2], [3, 4]], dtype="float64"), + np.array([["a", "b"], ["c", "d"]]), # str dtype + np.array([[b"a", b"b"], [b"c", b"d"]]), # bytes dtype + np.array([[1, "a"], [3, "b"]], dtype="object"), + ]: + enc.fit(X) + assert all([enc.categories_[i].dtype == X.dtype for i in range(2)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + X = [[1, 2], [3, 4]] + enc.fit(X) + assert all([np.issubdtype(enc.categories_[i].dtype, np.integer) for i in range(2)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + X = [[1, "a"], [3, "b"]] + enc.fit(X) + assert all([enc.categories_[i].dtype == "object" for i in range(2)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + +def test_encoder_dtypes_pandas(): + # check dtype (similar to test_categorical_encoder_dtypes for dataframes) + pd = pytest.importorskip("pandas") + + enc = OneHotEncoder(categories="auto") + exp = np.array( + [[1.0, 0.0, 1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 0.0, 1.0]], + dtype="float64", + ) + + X = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}, dtype="int64") + enc.fit(X) + assert all([enc.categories_[i].dtype == "int64" for i in range(2)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + X = pd.DataFrame({"A": [1, 2], "B": ["a", "b"], "C": [3.0, 4.0]}) + X_type = [X["A"].dtype, X["B"].dtype, X["C"].dtype] + enc.fit(X) + assert all([enc.categories_[i].dtype == X_type[i] for i in range(3)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + +def test_one_hot_encoder_warning(): + enc = OneHotEncoder() + X = [["Male", 1], ["Female", 3]] + np.testing.assert_no_warnings(enc.fit_transform, X) + + +@pytest.mark.parametrize("missing_value", [np.nan, None, float("nan")]) +def test_one_hot_encoder_drop_manual(missing_value): + cats_to_drop = ["def", 12, 3, 56, missing_value] + enc = OneHotEncoder(drop=cats_to_drop) + X = [ + ["abc", 12, 2, 55, "a"], + ["def", 12, 1, 55, "a"], + ["def", 12, 3, 56, missing_value], + ] + trans = enc.fit_transform(X).toarray() + exp = [[1, 0, 1, 1, 1], [0, 1, 0, 1, 1], [0, 0, 0, 0, 0]] + assert_array_equal(trans, exp) + assert enc.drop is cats_to_drop + + dropped_cats = [ + cat[feature] for cat, feature in zip(enc.categories_, enc.drop_idx_) + ] + X_inv_trans = enc.inverse_transform(trans) + X_array = np.array(X, dtype=object) + + # last value is np.nan + if is_scalar_nan(cats_to_drop[-1]): + assert_array_equal(dropped_cats[:-1], cats_to_drop[:-1]) + assert is_scalar_nan(dropped_cats[-1]) + assert is_scalar_nan(cats_to_drop[-1]) + # do not include the last column which includes missing values + assert_array_equal(X_array[:, :-1], X_inv_trans[:, :-1]) + + # check last column is the missing value + assert_array_equal(X_array[-1, :-1], X_inv_trans[-1, :-1]) + assert is_scalar_nan(X_array[-1, -1]) + assert is_scalar_nan(X_inv_trans[-1, -1]) + else: + assert_array_equal(dropped_cats, cats_to_drop) + assert_array_equal(X_array, X_inv_trans) + + +@pytest.mark.parametrize("drop", [["abc", 3], ["abc", 3, 41, "a"]]) +def test_invalid_drop_length(drop): + enc = OneHotEncoder(drop=drop) + err_msg = "`drop` should have length equal to the number" + with pytest.raises(ValueError, match=err_msg): + enc.fit([["abc", 2, 55], ["def", 1, 55], ["def", 3, 59]]) + + +@pytest.mark.parametrize("density", [True, False], ids=["sparse", "dense"]) +@pytest.mark.parametrize("drop", ["first", ["a", 2, "b"]], ids=["first", "manual"]) +def test_categories(density, drop): + ohe_base = OneHotEncoder(sparse_output=density) + ohe_test = OneHotEncoder(sparse_output=density, drop=drop) + X = [["c", 1, "a"], ["a", 2, "b"]] + ohe_base.fit(X) + ohe_test.fit(X) + assert_array_equal(ohe_base.categories_, ohe_test.categories_) + if drop == "first": + assert_array_equal(ohe_test.drop_idx_, 0) + else: + for drop_cat, drop_idx, cat_list in zip( + drop, ohe_test.drop_idx_, ohe_test.categories_ + ): + assert cat_list[int(drop_idx)] == drop_cat + assert isinstance(ohe_test.drop_idx_, np.ndarray) + assert ohe_test.drop_idx_.dtype == object + + +@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) +def test_encoders_has_categorical_tags(Encoder): + assert "categorical" in Encoder()._get_tags()["X_types"] + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 2}, + {"min_frequency": 11}, + {"min_frequency": 0.29}, + {"max_categories": 2, "min_frequency": 6}, + {"max_categories": 4, "min_frequency": 12}, + ], +) +@pytest.mark.parametrize("categories", ["auto", [["a", "b", "c", "d"]]]) +def test_ohe_infrequent_two_levels(kwargs, categories): + """Test that different parameters for combine 'a', 'c', and 'd' into + the infrequent category works as expected.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + categories=categories, + handle_unknown="infrequent_if_exist", + sparse_output=False, + **kwargs, + ).fit(X_train) + assert_array_equal(ohe.infrequent_categories_, [["a", "c", "d"]]) + + X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] + expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + expected_inv = [[col] for col in ["b"] + ["infrequent_sklearn"] * 4] + X_inv = ohe.inverse_transform(X_trans) + assert_array_equal(expected_inv, X_inv) + + feature_names = ohe.get_feature_names_out() + assert_array_equal(["x0_b", "x0_infrequent_sklearn"], feature_names) + + +@pytest.mark.parametrize("drop", ["if_binary", "first", ["b"]]) +def test_ohe_infrequent_two_levels_drop_frequent(drop): + """Test two levels and dropping the frequent category.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", + sparse_output=False, + max_categories=2, + drop=drop, + ).fit(X_train) + assert ohe.categories_[0][ohe.drop_idx_[0]] == "b" + + X_test = np.array([["b"], ["c"]]) + X_trans = ohe.transform(X_test) + assert_allclose([[0], [1]], X_trans) + + feature_names = ohe.get_feature_names_out() + assert_array_equal(["x0_infrequent_sklearn"], feature_names) + + X_inverse = ohe.inverse_transform(X_trans) + assert_array_equal([["b"], ["infrequent_sklearn"]], X_inverse) + + +@pytest.mark.parametrize("drop", [["a"], ["d"]]) +def test_ohe_infrequent_two_levels_drop_infrequent_errors(drop): + """Test two levels and dropping any infrequent category removes the + whole infrequent category.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", + sparse_output=False, + max_categories=2, + drop=drop, + ) + + msg = f"Unable to drop category {drop[0]!r} from feature 0 because it is infrequent" + with pytest.raises(ValueError, match=msg): + ohe.fit(X_train) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 3}, + {"min_frequency": 6}, + {"min_frequency": 9}, + {"min_frequency": 0.24}, + {"min_frequency": 0.16}, + {"max_categories": 3, "min_frequency": 8}, + {"max_categories": 4, "min_frequency": 6}, + ], +) +def test_ohe_infrequent_three_levels(kwargs): + """Test that different parameters for combing 'a', and 'd' into + the infrequent category works as expected.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", sparse_output=False, **kwargs + ).fit(X_train) + assert_array_equal(ohe.infrequent_categories_, [["a", "d"]]) + + X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] + expected = np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + expected_inv = [ + ["b"], + ["infrequent_sklearn"], + ["c"], + ["infrequent_sklearn"], + ["infrequent_sklearn"], + ] + X_inv = ohe.inverse_transform(X_trans) + assert_array_equal(expected_inv, X_inv) + + feature_names = ohe.get_feature_names_out() + assert_array_equal(["x0_b", "x0_c", "x0_infrequent_sklearn"], feature_names) + + +@pytest.mark.parametrize("drop", ["first", ["b"]]) +def test_ohe_infrequent_three_levels_drop_frequent(drop): + """Test three levels and dropping the frequent category.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", + sparse_output=False, + max_categories=3, + drop=drop, + ).fit(X_train) + + X_test = np.array([["b"], ["c"], ["d"]]) + assert_allclose([[0, 0], [1, 0], [0, 1]], ohe.transform(X_test)) + + # Check handle_unknown="ignore" + ohe.set_params(handle_unknown="ignore").fit(X_train) + msg = "Found unknown categories" + with pytest.warns(UserWarning, match=msg): + X_trans = ohe.transform([["b"], ["e"]]) + + assert_allclose([[0, 0], [0, 0]], X_trans) + + +@pytest.mark.parametrize("drop", [["a"], ["d"]]) +def test_ohe_infrequent_three_levels_drop_infrequent_errors(drop): + """Test three levels and dropping the infrequent category.""" + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", + sparse_output=False, + max_categories=3, + drop=drop, + ) + + msg = f"Unable to drop category {drop[0]!r} from feature 0 because it is infrequent" + with pytest.raises(ValueError, match=msg): + ohe.fit(X_train) + + +def test_ohe_infrequent_handle_unknown_error(): + """Test that different parameters for combining 'a', and 'd' into + the infrequent category works as expected.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="error", sparse_output=False, max_categories=3 + ).fit(X_train) + assert_array_equal(ohe.infrequent_categories_, [["a", "d"]]) + + # all categories are known + X_test = [["b"], ["a"], ["c"], ["d"]] + expected = np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + # 'bad' is not known and will error + X_test = [["bad"]] + msg = r"Found unknown categories \['bad'\] in column 0" + with pytest.raises(ValueError, match=msg): + ohe.transform(X_test) + + +@pytest.mark.parametrize( + "kwargs", [{"max_categories": 3, "min_frequency": 1}, {"min_frequency": 4}] +) +def test_ohe_infrequent_two_levels_user_cats_one_frequent(kwargs): + """'a' is the only frequent category, all other categories are infrequent.""" + + X_train = np.array([["a"] * 5 + ["e"] * 30], dtype=object).T + ohe = OneHotEncoder( + categories=[["c", "d", "a", "b"]], + sparse_output=False, + handle_unknown="infrequent_if_exist", + **kwargs, + ).fit(X_train) + + X_test = [["a"], ["b"], ["c"], ["d"], ["e"]] + expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + # 'a' is dropped + drops = ["first", "if_binary", ["a"]] + X_test = [["a"], ["c"]] + for drop in drops: + ohe.set_params(drop=drop).fit(X_train) + assert_allclose([[0], [1]], ohe.transform(X_test)) + + +def test_ohe_infrequent_two_levels_user_cats(): + """Test that the order of the categories provided by a user is respected.""" + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + ohe = OneHotEncoder( + categories=[["c", "d", "a", "b"]], + sparse_output=False, + handle_unknown="infrequent_if_exist", + max_categories=2, + ).fit(X_train) + + assert_array_equal(ohe.infrequent_categories_, [["c", "d", "a"]]) + + X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] + expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + # 'infrequent' is used to denote the infrequent categories for + # `inverse_transform` + expected_inv = [[col] for col in ["b"] + ["infrequent_sklearn"] * 4] + X_inv = ohe.inverse_transform(X_trans) + assert_array_equal(expected_inv, X_inv) + + +def test_ohe_infrequent_three_levels_user_cats(): + """Test that the order of the categories provided by a user is respected. + In this case 'c' is encoded as the first category and 'b' is encoded + as the second one.""" + + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + ohe = OneHotEncoder( + categories=[["c", "d", "b", "a"]], + sparse_output=False, + handle_unknown="infrequent_if_exist", + max_categories=3, + ).fit(X_train) + + assert_array_equal(ohe.infrequent_categories_, [["d", "a"]]) + + X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] + expected = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + # 'infrequent' is used to denote the infrequent categories for + # `inverse_transform` + expected_inv = [ + ["b"], + ["infrequent_sklearn"], + ["c"], + ["infrequent_sklearn"], + ["infrequent_sklearn"], + ] + X_inv = ohe.inverse_transform(X_trans) + assert_array_equal(expected_inv, X_inv) + + +def test_ohe_infrequent_mixed(): + """Test infrequent categories where feature 0 has infrequent categories, + and feature 1 does not.""" + + # X[:, 0] 1 and 2 are infrequent + # X[:, 1] nothing is infrequent + X = np.c_[[0, 1, 3, 3, 3, 3, 2, 0, 3], [0, 0, 0, 0, 1, 1, 1, 1, 1]] + + ohe = OneHotEncoder(max_categories=3, drop="if_binary", sparse_output=False) + ohe.fit(X) + + X_test = [[3, 0], [1, 1]] + X_trans = ohe.transform(X_test) + + # feature 1 is binary so it drops a category 0 + assert_allclose(X_trans, [[0, 1, 0, 0], [0, 0, 1, 1]]) + + +def test_ohe_infrequent_multiple_categories(): + """Test infrequent categories with feature matrix with 3 features.""" + + X = np.c_[ + [0, 1, 3, 3, 3, 3, 2, 0, 3], + [0, 0, 5, 1, 1, 10, 5, 5, 0], + [1, 0, 1, 0, 1, 0, 1, 0, 1], + ] + + ohe = OneHotEncoder( + categories="auto", max_categories=3, handle_unknown="infrequent_if_exist" + ) + # X[:, 0] 1 and 2 are infrequent + # X[:, 1] 1 and 10 are infrequent + # X[:, 2] nothing is infrequent + + X_trans = ohe.fit_transform(X).toarray() + assert_array_equal(ohe.infrequent_categories_[0], [1, 2]) + assert_array_equal(ohe.infrequent_categories_[1], [1, 10]) + assert_array_equal(ohe.infrequent_categories_[2], None) + + # 'infrequent' is used to denote the infrequent categories + # For the first column, 1 and 2 have the same frequency. In this case, + # 1 will be chosen to be the feature name because is smaller lexiconically + feature_names = ohe.get_feature_names_out() + assert_array_equal( + [ + "x0_0", + "x0_3", + "x0_infrequent_sklearn", + "x1_0", + "x1_5", + "x1_infrequent_sklearn", + "x2_0", + "x2_1", + ], + feature_names, + ) + + expected = [ + [1, 0, 0, 1, 0, 0, 0, 1], + [0, 0, 1, 1, 0, 0, 1, 0], + [0, 1, 0, 0, 1, 0, 0, 1], + [0, 1, 0, 0, 0, 1, 1, 0], + [0, 1, 0, 0, 0, 1, 0, 1], + [0, 1, 0, 0, 0, 1, 1, 0], + [0, 0, 1, 0, 1, 0, 0, 1], + [1, 0, 0, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 0, 0, 1], + ] + + assert_allclose(expected, X_trans) + + X_test = [[3, 1, 2], [4, 0, 3]] + + X_test_trans = ohe.transform(X_test) + + # X[:, 2] does not have an infrequent category, thus it is encoded as all + # zeros + expected = [[0, 1, 0, 0, 0, 1, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0]] + assert_allclose(expected, X_test_trans.toarray()) + + X_inv = ohe.inverse_transform(X_test_trans) + expected_inv = np.array( + [[3, "infrequent_sklearn", None], ["infrequent_sklearn", 0, None]], dtype=object + ) + assert_array_equal(expected_inv, X_inv) + + # error for unknown categories + ohe = OneHotEncoder( + categories="auto", max_categories=3, handle_unknown="error" + ).fit(X) + with pytest.raises(ValueError, match="Found unknown categories"): + ohe.transform(X_test) + + # only infrequent or known categories + X_test = [[1, 1, 1], [3, 10, 0]] + X_test_trans = ohe.transform(X_test) + + expected = [[0, 0, 1, 0, 0, 1, 0, 1], [0, 1, 0, 0, 0, 1, 1, 0]] + assert_allclose(expected, X_test_trans.toarray()) + + X_inv = ohe.inverse_transform(X_test_trans) + + expected_inv = np.array( + [["infrequent_sklearn", "infrequent_sklearn", 1], [3, "infrequent_sklearn", 0]], + dtype=object, + ) + assert_array_equal(expected_inv, X_inv) + + +def test_ohe_infrequent_multiple_categories_dtypes(): + """Test infrequent categories with a pandas dataframe with multiple dtypes.""" + + pd = pytest.importorskip("pandas") + X = pd.DataFrame( + { + "str": ["a", "f", "c", "f", "f", "a", "c", "b", "b"], + "int": [5, 3, 0, 10, 10, 12, 0, 3, 5], + }, + columns=["str", "int"], + ) + + ohe = OneHotEncoder( + categories="auto", max_categories=3, handle_unknown="infrequent_if_exist" + ) + # X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be + # considered infrequent because they are greater + + # X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1. + # 0, 3, 12 will be considered infrequent + + X_trans = ohe.fit_transform(X).toarray() + assert_array_equal(ohe.infrequent_categories_[0], ["a", "b"]) + assert_array_equal(ohe.infrequent_categories_[1], [0, 3, 12]) + + expected = [ + [0, 0, 1, 1, 0, 0], + [0, 1, 0, 0, 0, 1], + [1, 0, 0, 0, 0, 1], + [0, 1, 0, 0, 1, 0], + [0, 1, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 1], + [1, 0, 0, 0, 0, 1], + [0, 0, 1, 0, 0, 1], + [0, 0, 1, 1, 0, 0], + ] + + assert_allclose(expected, X_trans) + + X_test = pd.DataFrame({"str": ["b", "f"], "int": [14, 12]}, columns=["str", "int"]) + + expected = [[0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 0, 1]] + X_test_trans = ohe.transform(X_test) + assert_allclose(expected, X_test_trans.toarray()) + + X_inv = ohe.inverse_transform(X_test_trans) + expected_inv = np.array( + [["infrequent_sklearn", "infrequent_sklearn"], ["f", "infrequent_sklearn"]], + dtype=object, + ) + assert_array_equal(expected_inv, X_inv) + + # only infrequent or known categories + X_test = pd.DataFrame({"str": ["c", "b"], "int": [12, 5]}, columns=["str", "int"]) + X_test_trans = ohe.transform(X_test).toarray() + expected = [[1, 0, 0, 0, 0, 1], [0, 0, 1, 1, 0, 0]] + assert_allclose(expected, X_test_trans) + + X_inv = ohe.inverse_transform(X_test_trans) + expected_inv = np.array( + [["c", "infrequent_sklearn"], ["infrequent_sklearn", 5]], dtype=object + ) + assert_array_equal(expected_inv, X_inv) + + +@pytest.mark.parametrize("kwargs", [{"min_frequency": 21, "max_categories": 1}]) +def test_ohe_infrequent_one_level_errors(kwargs): + """All user provided categories are infrequent.""" + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 2]).T + + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", sparse_output=False, **kwargs + ) + ohe.fit(X_train) + + X_trans = ohe.transform([["a"]]) + assert_allclose(X_trans, [[1]]) + + +@pytest.mark.parametrize("kwargs", [{"min_frequency": 2, "max_categories": 3}]) +def test_ohe_infrequent_user_cats_unknown_training_errors(kwargs): + """All user provided categories are infrequent.""" + + X_train = np.array([["e"] * 3], dtype=object).T + ohe = OneHotEncoder( + categories=[["c", "d", "a", "b"]], + sparse_output=False, + handle_unknown="infrequent_if_exist", + **kwargs, + ).fit(X_train) + + X_trans = ohe.transform([["a"], ["e"]]) + assert_allclose(X_trans, [[1], [1]]) + + +# deliberately omit 'OS' as an invalid combo +@pytest.mark.parametrize( + "input_dtype, category_dtype", ["OO", "OU", "UO", "UU", "SO", "SU", "SS"] +) +@pytest.mark.parametrize("array_type", ["list", "array", "dataframe"]) +def test_encoders_string_categories(input_dtype, category_dtype, array_type): + """Check that encoding work with object, unicode, and byte string dtypes. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/15616 + https://github.com/scikit-learn/scikit-learn/issues/15726 + https://github.com/scikit-learn/scikit-learn/issues/19677 + """ + + X = np.array([["b"], ["a"]], dtype=input_dtype) + categories = [np.array(["b", "a"], dtype=category_dtype)] + ohe = OneHotEncoder(categories=categories, sparse_output=False).fit(X) + + X_test = _convert_container( + [["a"], ["a"], ["b"], ["a"]], array_type, dtype=input_dtype + ) + X_trans = ohe.transform(X_test) + + expected = np.array([[0, 1], [0, 1], [1, 0], [0, 1]]) + assert_allclose(X_trans, expected) + + oe = OrdinalEncoder(categories=categories).fit(X) + X_trans = oe.transform(X_test) + + expected = np.array([[1], [1], [0], [1]]) + assert_array_equal(X_trans, expected) + + +def test_mixed_string_bytes_categoricals(): + """Check that this mixture of predefined categories and X raises an error. + + Categories defined as bytes can not easily be compared to data that is + a string. + """ + # data as unicode + X = np.array([["b"], ["a"]], dtype="U") + # predefined categories as bytes + categories = [np.array(["b", "a"], dtype="S")] + ohe = OneHotEncoder(categories=categories, sparse_output=False) + + msg = re.escape( + "In column 0, the predefined categories have type 'bytes' which is incompatible" + " with values of type 'str_'." + ) + + with pytest.raises(ValueError, match=msg): + ohe.fit(X) + + +@pytest.mark.parametrize("missing_value", [np.nan, None]) +def test_ohe_missing_values_get_feature_names(missing_value): + # encoder with missing values with object dtypes + X = np.array([["a", "b", missing_value, "a", missing_value]], dtype=object).T + ohe = OneHotEncoder(sparse_output=False, handle_unknown="ignore").fit(X) + names = ohe.get_feature_names_out() + assert_array_equal(names, ["x0_a", "x0_b", f"x0_{missing_value}"]) + + +def test_ohe_missing_value_support_pandas(): + # check support for pandas with mixed dtypes and missing values + pd = pytest.importorskip("pandas") + df = pd.DataFrame( + { + "col1": ["dog", "cat", None, "cat"], + "col2": np.array([3, 0, 4, np.nan], dtype=float), + }, + columns=["col1", "col2"], + ) + expected_df_trans = np.array( + [ + [0, 1, 0, 0, 1, 0, 0], + [1, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0], + [1, 0, 0, 0, 0, 0, 1], + ] + ) + + Xtr = check_categorical_onehot(df) + assert_allclose(Xtr, expected_df_trans) + + +@pytest.mark.parametrize("handle_unknown", ["infrequent_if_exist", "ignore"]) +@pytest.mark.parametrize("pd_nan_type", ["pd.NA", "np.nan"]) +def test_ohe_missing_value_support_pandas_categorical(pd_nan_type, handle_unknown): + # checks pandas dataframe with categorical features + pd = pytest.importorskip("pandas") + + pd_missing_value = pd.NA if pd_nan_type == "pd.NA" else np.nan + + df = pd.DataFrame( + { + "col1": pd.Series(["c", "a", pd_missing_value, "b", "a"], dtype="category"), + } + ) + expected_df_trans = np.array( + [ + [0, 0, 1, 0], + [1, 0, 0, 0], + [0, 0, 0, 1], + [0, 1, 0, 0], + [1, 0, 0, 0], + ] + ) + + ohe = OneHotEncoder(sparse_output=False, handle_unknown=handle_unknown) + df_trans = ohe.fit_transform(df) + assert_allclose(expected_df_trans, df_trans) + + assert len(ohe.categories_) == 1 + assert_array_equal(ohe.categories_[0][:-1], ["a", "b", "c"]) + assert np.isnan(ohe.categories_[0][-1]) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_ohe_drop_first_handle_unknown_ignore_warns(handle_unknown): + """Check drop='first' and handle_unknown='ignore'/'infrequent_if_exist' + during transform.""" + X = [["a", 0], ["b", 2], ["b", 1]] + + ohe = OneHotEncoder( + drop="first", sparse_output=False, handle_unknown=handle_unknown + ) + X_trans = ohe.fit_transform(X) + + X_expected = np.array( + [ + [0, 0, 0], + [1, 0, 1], + [1, 1, 0], + ] + ) + assert_allclose(X_trans, X_expected) + + # Both categories are unknown + X_test = [["c", 3]] + X_expected = np.array([[0, 0, 0]]) + + warn_msg = ( + r"Found unknown categories in columns \[0, 1\] during " + "transform. These unknown categories will be encoded as all " + "zeros" + ) + with pytest.warns(UserWarning, match=warn_msg): + X_trans = ohe.transform(X_test) + assert_allclose(X_trans, X_expected) + + # inverse_transform maps to None + X_inv = ohe.inverse_transform(X_expected) + assert_array_equal(X_inv, np.array([["a", 0]], dtype=object)) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_ohe_drop_if_binary_handle_unknown_ignore_warns(handle_unknown): + """Check drop='if_binary' and handle_unknown='ignore' during transform.""" + X = [["a", 0], ["b", 2], ["b", 1]] + + ohe = OneHotEncoder( + drop="if_binary", sparse_output=False, handle_unknown=handle_unknown + ) + X_trans = ohe.fit_transform(X) + + X_expected = np.array( + [ + [0, 1, 0, 0], + [1, 0, 0, 1], + [1, 0, 1, 0], + ] + ) + assert_allclose(X_trans, X_expected) + + # Both categories are unknown + X_test = [["c", 3]] + X_expected = np.array([[0, 0, 0, 0]]) + + warn_msg = ( + r"Found unknown categories in columns \[0, 1\] during " + "transform. These unknown categories will be encoded as all " + "zeros" + ) + with pytest.warns(UserWarning, match=warn_msg): + X_trans = ohe.transform(X_test) + assert_allclose(X_trans, X_expected) + + # inverse_transform maps to None + X_inv = ohe.inverse_transform(X_expected) + assert_array_equal(X_inv, np.array([["a", None]], dtype=object)) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_ohe_drop_first_explicit_categories(handle_unknown): + """Check drop='first' and handle_unknown='ignore'/'infrequent_if_exist' + during fit with categories passed in.""" + + X = [["a", 0], ["b", 2], ["b", 1]] + + ohe = OneHotEncoder( + drop="first", + sparse_output=False, + handle_unknown=handle_unknown, + categories=[["b", "a"], [1, 2]], + ) + ohe.fit(X) + + X_test = [["c", 1]] + X_expected = np.array([[0, 0]]) + + warn_msg = ( + r"Found unknown categories in columns \[0\] during transform. " + r"These unknown categories will be encoded as all zeros" + ) + with pytest.warns(UserWarning, match=warn_msg): + X_trans = ohe.transform(X_test) + assert_allclose(X_trans, X_expected) + + +def test_ohe_more_informative_error_message(): + """Raise informative error message when pandas output and sparse_output=True.""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame({"a": [1, 2, 3], "b": ["z", "b", "b"]}, columns=["a", "b"]) + + ohe = OneHotEncoder(sparse_output=True) + ohe.set_output(transform="pandas") + + msg = ( + "Pandas output does not support sparse data. Set " + "sparse_output=False to output pandas dataframes or disable Pandas output" + ) + with pytest.raises(ValueError, match=msg): + ohe.fit_transform(df) + + ohe.fit(df) + with pytest.raises(ValueError, match=msg): + ohe.transform(df) + + +def test_ordinal_encoder_passthrough_missing_values_float_errors_dtype(): + """Test ordinal encoder with nan passthrough fails when dtype=np.int32.""" + + X = np.array([[np.nan, 3.0, 1.0, 3.0]]).T + oe = OrdinalEncoder(dtype=np.int32) + + msg = ( + r"There are missing values in features \[0\]. For OrdinalEncoder " + f"to encode missing values with dtype: {np.int32}" + ) + with pytest.raises(ValueError, match=msg): + oe.fit(X) + + +@pytest.mark.parametrize("encoded_missing_value", [np.nan, -2]) +def test_ordinal_encoder_passthrough_missing_values_float(encoded_missing_value): + """Test ordinal encoder with nan on float dtypes.""" + + X = np.array([[np.nan, 3.0, 1.0, 3.0]], dtype=np.float64).T + oe = OrdinalEncoder(encoded_missing_value=encoded_missing_value).fit(X) + + assert len(oe.categories_) == 1 + + assert_allclose(oe.categories_[0], [1.0, 3.0, np.nan]) + + X_trans = oe.transform(X) + assert_allclose(X_trans, [[encoded_missing_value], [1.0], [0.0], [1.0]]) + + X_inverse = oe.inverse_transform(X_trans) + assert_allclose(X_inverse, X) + + +@pytest.mark.parametrize("pd_nan_type", ["pd.NA", "np.nan"]) +@pytest.mark.parametrize("encoded_missing_value", [np.nan, -2]) +def test_ordinal_encoder_missing_value_support_pandas_categorical( + pd_nan_type, encoded_missing_value +): + """Check ordinal encoder is compatible with pandas.""" + # checks pandas dataframe with categorical features + pd = pytest.importorskip("pandas") + + pd_missing_value = pd.NA if pd_nan_type == "pd.NA" else np.nan + + df = pd.DataFrame( + { + "col1": pd.Series(["c", "a", pd_missing_value, "b", "a"], dtype="category"), + } + ) + + oe = OrdinalEncoder(encoded_missing_value=encoded_missing_value).fit(df) + assert len(oe.categories_) == 1 + assert_array_equal(oe.categories_[0][:3], ["a", "b", "c"]) + assert np.isnan(oe.categories_[0][-1]) + + df_trans = oe.transform(df) + + assert_allclose(df_trans, [[2.0], [0.0], [encoded_missing_value], [1.0], [0.0]]) + + X_inverse = oe.inverse_transform(df_trans) + assert X_inverse.shape == (5, 1) + assert_array_equal(X_inverse[:2, 0], ["c", "a"]) + assert_array_equal(X_inverse[3:, 0], ["b", "a"]) + assert np.isnan(X_inverse[2, 0]) + + +@pytest.mark.parametrize( + "X, X2, cats, cat_dtype", + [ + ( + ( + np.array([["a", np.nan]], dtype=object).T, + np.array([["a", "b"]], dtype=object).T, + [np.array(["a", "d", np.nan], dtype=object)], + np.object_, + ) + ), + ( + ( + np.array([["a", np.nan]], dtype=object).T, + np.array([["a", "b"]], dtype=object).T, + [np.array(["a", "d", np.nan], dtype=object)], + np.object_, + ) + ), + ( + ( + np.array([[2.0, np.nan]], dtype=np.float64).T, + np.array([[3.0]], dtype=np.float64).T, + [np.array([2.0, 4.0, np.nan])], + np.float64, + ) + ), + ], + ids=[ + "object-None-missing-value", + "object-nan-missing_value", + "numeric-missing-value", + ], +) +def test_ordinal_encoder_specified_categories_missing_passthrough( + X, X2, cats, cat_dtype +): + """Test ordinal encoder for specified categories.""" + oe = OrdinalEncoder(categories=cats) + exp = np.array([[0.0], [np.nan]]) + assert_array_equal(oe.fit_transform(X), exp) + # manually specified categories should have same dtype as + # the data when coerced from lists + assert oe.categories_[0].dtype == cat_dtype + + # when specifying categories manually, unknown categories should already + # raise when fitting + oe = OrdinalEncoder(categories=cats) + with pytest.raises(ValueError, match="Found unknown categories"): + oe.fit(X2) + + +@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) +def test_encoder_duplicate_specified_categories(Encoder): + """Test encoder for specified categories have duplicate values. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27088 + """ + cats = [np.array(["a", "b", "a"], dtype=object)] + enc = Encoder(categories=cats) + X = np.array([["a", "b"]], dtype=object).T + with pytest.raises( + ValueError, match="the predefined categories contain duplicate elements." + ): + enc.fit(X) + + +@pytest.mark.parametrize( + "X, expected_X_trans, X_test", + [ + ( + np.array([[1.0, np.nan, 3.0]]).T, + np.array([[0.0, np.nan, 1.0]]).T, + np.array([[4.0]]), + ), + ( + np.array([[1.0, 4.0, 3.0]]).T, + np.array([[0.0, 2.0, 1.0]]).T, + np.array([[np.nan]]), + ), + ( + np.array([["c", np.nan, "b"]], dtype=object).T, + np.array([[1.0, np.nan, 0.0]]).T, + np.array([["d"]], dtype=object), + ), + ( + np.array([["c", "a", "b"]], dtype=object).T, + np.array([[2.0, 0.0, 1.0]]).T, + np.array([[np.nan]], dtype=object), + ), + ], +) +def test_ordinal_encoder_handle_missing_and_unknown(X, expected_X_trans, X_test): + """Test the interaction between missing values and handle_unknown""" + + oe = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1) + + X_trans = oe.fit_transform(X) + assert_allclose(X_trans, expected_X_trans) + + assert_allclose(oe.transform(X_test), [[-1.0]]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_ordinal_encoder_sparse(csr_container): + """Check that we raise proper error with sparse input in OrdinalEncoder. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19878 + """ + X = np.array([[3, 2, 1], [0, 1, 1]]) + X_sparse = csr_container(X) + + encoder = OrdinalEncoder() + + err_msg = "Sparse data was passed, but dense data is required" + with pytest.raises(TypeError, match=err_msg): + encoder.fit(X_sparse) + with pytest.raises(TypeError, match=err_msg): + encoder.fit_transform(X_sparse) + + X_trans = encoder.fit_transform(X) + X_trans_sparse = csr_container(X_trans) + with pytest.raises(TypeError, match=err_msg): + encoder.inverse_transform(X_trans_sparse) + + +def test_ordinal_encoder_fit_with_unseen_category(): + """Check OrdinalEncoder.fit works with unseen category when + `handle_unknown="use_encoded_value"`. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19872 + """ + X = np.array([0, 0, 1, 0, 2, 5])[:, np.newaxis] + oe = OrdinalEncoder( + categories=[[-1, 0, 1]], handle_unknown="use_encoded_value", unknown_value=-999 + ) + oe.fit(X) + + oe = OrdinalEncoder(categories=[[-1, 0, 1]], handle_unknown="error") + with pytest.raises(ValueError, match="Found unknown categories"): + oe.fit(X) + + +@pytest.mark.parametrize( + "X_train", + [ + [["AA", "B"]], + np.array([["AA", "B"]], dtype="O"), + np.array([["AA", "B"]], dtype="U"), + ], +) +@pytest.mark.parametrize( + "X_test", + [ + [["A", "B"]], + np.array([["A", "B"]], dtype="O"), + np.array([["A", "B"]], dtype="U"), + ], +) +def test_ordinal_encoder_handle_unknown_string_dtypes(X_train, X_test): + """Checks that `OrdinalEncoder` transforms string dtypes. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19872 + """ + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-9) + enc.fit(X_train) + + X_trans = enc.transform(X_test) + assert_allclose(X_trans, [[-9, 0]]) + + +def test_ordinal_encoder_python_integer(): + """Check that `OrdinalEncoder` accepts Python integers that are potentially + larger than 64 bits. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20721 + """ + X = np.array( + [ + 44253463435747313673, + 9867966753463435747313673, + 44253462342215747313673, + 442534634357764313673, + ] + ).reshape(-1, 1) + encoder = OrdinalEncoder().fit(X) + assert_array_equal(encoder.categories_, np.sort(X, axis=0).T) + X_trans = encoder.transform(X) + assert_array_equal(X_trans, [[0], [3], [2], [1]]) + + +def test_ordinal_encoder_features_names_out_pandas(): + """Check feature names out is same as the input.""" + pd = pytest.importorskip("pandas") + + names = ["b", "c", "a"] + X = pd.DataFrame([[1, 2, 3]], columns=names) + enc = OrdinalEncoder().fit(X) + + feature_names_out = enc.get_feature_names_out() + assert_array_equal(names, feature_names_out) + + +def test_ordinal_encoder_unknown_missing_interaction(): + """Check interactions between encode_unknown and missing value encoding.""" + + X = np.array([["a"], ["b"], [np.nan]], dtype=object) + + oe = OrdinalEncoder( + handle_unknown="use_encoded_value", + unknown_value=np.nan, + encoded_missing_value=-3, + ).fit(X) + + X_trans = oe.transform(X) + assert_allclose(X_trans, [[0], [1], [-3]]) + + # "c" is unknown and is mapped to np.nan + # "None" is a missing value and is set to -3 + X_test = np.array([["c"], [np.nan]], dtype=object) + X_test_trans = oe.transform(X_test) + assert_allclose(X_test_trans, [[np.nan], [-3]]) + + # Non-regression test for #24082 + X_roundtrip = oe.inverse_transform(X_test_trans) + + # np.nan is unknown so it maps to None + assert X_roundtrip[0][0] is None + + # -3 is the encoded missing value so it maps back to nan + assert np.isnan(X_roundtrip[1][0]) + + +@pytest.mark.parametrize("with_pandas", [True, False]) +def test_ordinal_encoder_encoded_missing_value_error(with_pandas): + """Check OrdinalEncoder errors when encoded_missing_value is used by + an known category.""" + X = np.array([["a", "dog"], ["b", "cat"], ["c", np.nan]], dtype=object) + + # The 0-th feature has no missing values so it is not included in the list of + # features + error_msg = ( + r"encoded_missing_value \(1\) is already used to encode a known category " + r"in features: " + ) + + if with_pandas: + pd = pytest.importorskip("pandas") + X = pd.DataFrame(X, columns=["letter", "pet"]) + error_msg = error_msg + r"\['pet'\]" + else: + error_msg = error_msg + r"\[1\]" + + oe = OrdinalEncoder(encoded_missing_value=1) + + with pytest.raises(ValueError, match=error_msg): + oe.fit(X) + + +@pytest.mark.parametrize( + "X_train, X_test_trans_expected, X_roundtrip_expected", + [ + ( + # missing value is not in training set + # inverse transform will considering encoded nan as unknown + np.array([["a"], ["1"]], dtype=object), + [[0], [np.nan], [np.nan]], + np.asarray([["1"], [None], [None]], dtype=object), + ), + ( + # missing value in training set, + # inverse transform will considering encoded nan as missing + np.array([[np.nan], ["1"], ["a"]], dtype=object), + [[0], [np.nan], [np.nan]], + np.asarray([["1"], [np.nan], [np.nan]], dtype=object), + ), + ], +) +def test_ordinal_encoder_unknown_missing_interaction_both_nan( + X_train, X_test_trans_expected, X_roundtrip_expected +): + """Check transform when unknown_value and encoded_missing_value is nan. + + Non-regression test for #24082. + """ + oe = OrdinalEncoder( + handle_unknown="use_encoded_value", + unknown_value=np.nan, + encoded_missing_value=np.nan, + ).fit(X_train) + + X_test = np.array([["1"], [np.nan], ["b"]]) + X_test_trans = oe.transform(X_test) + + # both nan and unknown are encoded as nan + assert_allclose(X_test_trans, X_test_trans_expected) + X_roundtrip = oe.inverse_transform(X_test_trans) + + n_samples = X_roundtrip_expected.shape[0] + for i in range(n_samples): + expected_val = X_roundtrip_expected[i, 0] + val = X_roundtrip[i, 0] + + if expected_val is None: + assert val is None + elif is_scalar_nan(expected_val): + assert np.isnan(val) + else: + assert val == expected_val + + +def test_one_hot_encoder_set_output(): + """Check OneHotEncoder works with set_output.""" + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) + ohe = OneHotEncoder() + + ohe.set_output(transform="pandas") + + match = "Pandas output does not support sparse data. Set sparse_output=False" + with pytest.raises(ValueError, match=match): + ohe.fit_transform(X_df) + + ohe_default = OneHotEncoder(sparse_output=False).set_output(transform="default") + ohe_pandas = OneHotEncoder(sparse_output=False).set_output(transform="pandas") + + X_default = ohe_default.fit_transform(X_df) + X_pandas = ohe_pandas.fit_transform(X_df) + + assert_allclose(X_pandas.to_numpy(), X_default) + assert_array_equal(ohe_pandas.get_feature_names_out(), X_pandas.columns) + + +def test_ordinal_set_output(): + """Check OrdinalEncoder works with set_output.""" + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) + + ord_default = OrdinalEncoder().set_output(transform="default") + ord_pandas = OrdinalEncoder().set_output(transform="pandas") + + X_default = ord_default.fit_transform(X_df) + X_pandas = ord_pandas.fit_transform(X_df) + + assert_allclose(X_pandas.to_numpy(), X_default) + assert_array_equal(ord_pandas.get_feature_names_out(), X_pandas.columns) + + +def test_predefined_categories_dtype(): + """Check that the categories_ dtype is `object` for string categories + + Regression test for gh-25171. + """ + categories = [["as", "mmas", "eas", "ras", "acs"], ["1", "2"]] + + enc = OneHotEncoder(categories=categories) + + enc.fit([["as", "1"]]) + + assert len(categories) == len(enc.categories_) + for n, cat in enumerate(enc.categories_): + assert cat.dtype == object + assert_array_equal(categories[n], cat) + + +def test_ordinal_encoder_missing_unknown_encoding_max(): + """Check missing value or unknown encoding can equal the cardinality.""" + X = np.array([["dog"], ["cat"], [np.nan]], dtype=object) + X_trans = OrdinalEncoder(encoded_missing_value=2).fit_transform(X) + assert_allclose(X_trans, [[1], [0], [2]]) + + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=2).fit(X) + X_test = np.array([["snake"]]) + X_trans = enc.transform(X_test) + assert_allclose(X_trans, [[2]]) + + +def test_drop_idx_infrequent_categories(): + """Check drop_idx is defined correctly with infrequent categories. + + Non-regression test for gh-25550. + """ + X = np.array( + [["a"] * 2 + ["b"] * 4 + ["c"] * 4 + ["d"] * 4 + ["e"] * 4], dtype=object + ).T + ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop="first").fit(X) + assert_array_equal( + ohe.get_feature_names_out(), ["x0_c", "x0_d", "x0_e", "x0_infrequent_sklearn"] + ) + assert ohe.categories_[0][ohe.drop_idx_[0]] == "b" + + X = np.array([["a"] * 2 + ["b"] * 2 + ["c"] * 10], dtype=object).T + ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop="if_binary").fit(X) + assert_array_equal(ohe.get_feature_names_out(), ["x0_infrequent_sklearn"]) + assert ohe.categories_[0][ohe.drop_idx_[0]] == "c" + + X = np.array( + [["a"] * 2 + ["b"] * 4 + ["c"] * 4 + ["d"] * 4 + ["e"] * 4], dtype=object + ).T + ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop=["d"]).fit(X) + assert_array_equal( + ohe.get_feature_names_out(), ["x0_b", "x0_c", "x0_e", "x0_infrequent_sklearn"] + ) + assert ohe.categories_[0][ohe.drop_idx_[0]] == "d" + + ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop=None).fit(X) + assert_array_equal( + ohe.get_feature_names_out(), + ["x0_b", "x0_c", "x0_d", "x0_e", "x0_infrequent_sklearn"], + ) + assert ohe.drop_idx_ is None + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 3}, + {"min_frequency": 6}, + {"min_frequency": 9}, + {"min_frequency": 0.24}, + {"min_frequency": 0.16}, + {"max_categories": 3, "min_frequency": 8}, + {"max_categories": 4, "min_frequency": 6}, + ], +) +def test_ordinal_encoder_infrequent_three_levels(kwargs): + """Test parameters for grouping 'a', and 'd' into the infrequent category.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ordinal = OrdinalEncoder( + handle_unknown="use_encoded_value", unknown_value=-1, **kwargs + ).fit(X_train) + assert_array_equal(ordinal.categories_, [["a", "b", "c", "d"]]) + assert_array_equal(ordinal.infrequent_categories_, [["a", "d"]]) + + X_test = [["a"], ["b"], ["c"], ["d"], ["z"]] + expected_trans = [[2], [0], [1], [2], [-1]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + X_inverse = ordinal.inverse_transform(X_trans) + expected_inverse = [ + ["infrequent_sklearn"], + ["b"], + ["c"], + ["infrequent_sklearn"], + [None], + ] + assert_array_equal(X_inverse, expected_inverse) + + +def test_ordinal_encoder_infrequent_three_levels_user_cats(): + """Test that the order of the categories provided by a user is respected. + + In this case 'c' is encoded as the first category and 'b' is encoded + as the second one. + """ + + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + ordinal = OrdinalEncoder( + categories=[["c", "d", "b", "a"]], + max_categories=3, + handle_unknown="use_encoded_value", + unknown_value=-1, + ).fit(X_train) + assert_array_equal(ordinal.categories_, [["c", "d", "b", "a"]]) + assert_array_equal(ordinal.infrequent_categories_, [["d", "a"]]) + + X_test = [["a"], ["b"], ["c"], ["d"], ["z"]] + expected_trans = [[2], [1], [0], [2], [-1]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + X_inverse = ordinal.inverse_transform(X_trans) + expected_inverse = [ + ["infrequent_sklearn"], + ["b"], + ["c"], + ["infrequent_sklearn"], + [None], + ] + assert_array_equal(X_inverse, expected_inverse) + + +def test_ordinal_encoder_infrequent_mixed(): + """Test when feature 0 has infrequent categories and feature 1 does not.""" + + X = np.column_stack(([0, 1, 3, 3, 3, 3, 2, 0, 3], [0, 0, 0, 0, 1, 1, 1, 1, 1])) + + ordinal = OrdinalEncoder(max_categories=3).fit(X) + + assert_array_equal(ordinal.infrequent_categories_[0], [1, 2]) + assert ordinal.infrequent_categories_[1] is None + + X_test = [[3, 0], [1, 1]] + expected_trans = [[1, 0], [2, 1]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + X_inverse = ordinal.inverse_transform(X_trans) + expected_inverse = np.array([[3, 0], ["infrequent_sklearn", 1]], dtype=object) + assert_array_equal(X_inverse, expected_inverse) + + +def test_ordinal_encoder_infrequent_multiple_categories_dtypes(): + """Test infrequent categories with a pandas DataFrame with multiple dtypes.""" + + pd = pytest.importorskip("pandas") + categorical_dtype = pd.CategoricalDtype(["bird", "cat", "dog", "snake"]) + X = pd.DataFrame( + { + "str": ["a", "f", "c", "f", "f", "a", "c", "b", "b"], + "int": [5, 3, 0, 10, 10, 12, 0, 3, 5], + "categorical": pd.Series( + ["dog"] * 4 + ["cat"] * 3 + ["snake"] + ["bird"], + dtype=categorical_dtype, + ), + }, + columns=["str", "int", "categorical"], + ) + + ordinal = OrdinalEncoder(max_categories=3).fit(X) + # X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be + # considered infrequent because they appear first when sorted + + # X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1. + # 0, 3, 12 will be considered infrequent because they appear first when + # sorted. + + # X[:, 2] "snake" and "bird" or infrequent + + assert_array_equal(ordinal.infrequent_categories_[0], ["a", "b"]) + assert_array_equal(ordinal.infrequent_categories_[1], [0, 3, 12]) + assert_array_equal(ordinal.infrequent_categories_[2], ["bird", "snake"]) + + X_test = pd.DataFrame( + { + "str": ["a", "b", "f", "c"], + "int": [12, 0, 10, 5], + "categorical": pd.Series( + ["cat"] + ["snake"] + ["bird"] + ["dog"], + dtype=categorical_dtype, + ), + }, + columns=["str", "int", "categorical"], + ) + expected_trans = [[2, 2, 0], [2, 2, 2], [1, 1, 2], [0, 0, 1]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + +def test_ordinal_encoder_infrequent_custom_mapping(): + """Check behavior of unknown_value and encoded_missing_value with infrequent.""" + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3 + [np.nan]], dtype=object + ).T + + ordinal = OrdinalEncoder( + handle_unknown="use_encoded_value", + unknown_value=2, + max_categories=2, + encoded_missing_value=3, + ).fit(X_train) + assert_array_equal(ordinal.infrequent_categories_, [["a", "c", "d"]]) + + X_test = np.array([["a"], ["b"], ["c"], ["d"], ["e"], [np.nan]], dtype=object) + expected_trans = [[1], [0], [1], [1], [2], [3]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 6}, + {"min_frequency": 2}, + ], +) +def test_ordinal_encoder_all_frequent(kwargs): + """All categories are considered frequent have same encoding as default encoder.""" + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + + adjusted_encoder = OrdinalEncoder( + **kwargs, handle_unknown="use_encoded_value", unknown_value=-1 + ).fit(X_train) + default_encoder = OrdinalEncoder( + handle_unknown="use_encoded_value", unknown_value=-1 + ).fit(X_train) + + X_test = [["a"], ["b"], ["c"], ["d"], ["e"]] + + assert_allclose( + adjusted_encoder.transform(X_test), default_encoder.transform(X_test) + ) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 1}, + {"min_frequency": 100}, + ], +) +def test_ordinal_encoder_all_infrequent(kwargs): + """When all categories are infrequent, they are all encoded as zero.""" + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + encoder = OrdinalEncoder( + **kwargs, handle_unknown="use_encoded_value", unknown_value=-1 + ).fit(X_train) + + X_test = [["a"], ["b"], ["c"], ["d"], ["e"]] + assert_allclose(encoder.transform(X_test), [[0], [0], [0], [0], [-1]]) + + +def test_ordinal_encoder_missing_appears_frequent(): + """Check behavior when missing value appears frequently.""" + X = np.array( + [[np.nan] * 20 + ["dog"] * 10 + ["cat"] * 5 + ["snake"] + ["deer"]], + dtype=object, + ).T + ordinal = OrdinalEncoder(max_categories=3).fit(X) + + X_test = np.array([["snake", "cat", "dog", np.nan]], dtype=object).T + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, [[2], [0], [1], [np.nan]]) + + +def test_ordinal_encoder_missing_appears_infrequent(): + """Check behavior when missing value appears infrequently.""" + + # feature 0 has infrequent categories + # feature 1 has no infrequent categories + X = np.array( + [ + [np.nan] + ["dog"] * 10 + ["cat"] * 5 + ["snake"] + ["deer"], + ["red"] * 9 + ["green"] * 9, + ], + dtype=object, + ).T + ordinal = OrdinalEncoder(min_frequency=4).fit(X) + + X_test = np.array( + [ + ["snake", "red"], + ["deer", "green"], + [np.nan, "green"], + ["dog", "green"], + ["cat", "red"], + ], + dtype=object, + ) + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, [[2, 1], [2, 0], [np.nan, 0], [1, 0], [0, 1]]) + + +@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) +def test_encoder_not_fitted(Encoder): + """Check that we raise a `NotFittedError` by calling transform before fit with + the encoders. + + One could expect that the passing the `categories` argument to the encoder + would make it stateless. However, `fit` is making a couple of check, such as the + position of `np.nan`. + """ + X = np.array([["A"], ["B"], ["C"]], dtype=object) + encoder = Encoder(categories=[["A", "B", "C"]]) + with pytest.raises(NotFittedError): + encoder.transform(X) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_function_transformer.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_function_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..e7b86e88d1547cb296d89687f2179ab850349dd5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_function_transformer.py @@ -0,0 +1,591 @@ +import warnings + +import numpy as np +import pytest + +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import FunctionTransformer, StandardScaler +from sklearn.preprocessing._function_transformer import _get_adapter_from_container +from sklearn.utils._testing import ( + _convert_container, + assert_allclose_dense_sparse, + assert_array_equal, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + + +def test_get_adapter_from_container(): + """Check the behavior fo `_get_adapter_from_container`.""" + pd = pytest.importorskip("pandas") + X = pd.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]}) + adapter = _get_adapter_from_container(X) + assert adapter.container_lib == "pandas" + err_msg = "The container does not have a registered adapter in scikit-learn." + with pytest.raises(ValueError, match=err_msg): + _get_adapter_from_container(X.to_numpy()) + + +def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X): + def _func(X, *args, **kwargs): + args_store.append(X) + args_store.extend(args) + kwargs_store.update(kwargs) + return func(X) + + return _func + + +def test_delegate_to_func(): + # (args|kwargs)_store will hold the positional and keyword arguments + # passed to the function inside the FunctionTransformer. + args_store = [] + kwargs_store = {} + X = np.arange(10).reshape((5, 2)) + assert_array_equal( + FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X), + X, + "transform should have returned X unchanged", + ) + + # The function should only have received X. + assert args_store == [ + X + ], "Incorrect positional arguments passed to func: {args}".format(args=args_store) + + assert ( + not kwargs_store + ), "Unexpected keyword arguments passed to func: {args}".format(args=kwargs_store) + + # reset the argument stores. + args_store[:] = [] + kwargs_store.clear() + transformed = FunctionTransformer( + _make_func(args_store, kwargs_store), + ).transform(X) + + assert_array_equal( + transformed, X, err_msg="transform should have returned X unchanged" + ) + + # The function should have received X + assert args_store == [ + X + ], "Incorrect positional arguments passed to func: {args}".format(args=args_store) + + assert ( + not kwargs_store + ), "Unexpected keyword arguments passed to func: {args}".format(args=kwargs_store) + + +def test_np_log(): + X = np.arange(10).reshape((5, 2)) + + # Test that the numpy.log example still works. + assert_array_equal( + FunctionTransformer(np.log1p).transform(X), + np.log1p(X), + ) + + +def test_kw_arg(): + X = np.linspace(0, 1, num=10).reshape((5, 2)) + + F = FunctionTransformer(np.around, kw_args=dict(decimals=3)) + + # Test that rounding is correct + assert_array_equal(F.transform(X), np.around(X, decimals=3)) + + +def test_kw_arg_update(): + X = np.linspace(0, 1, num=10).reshape((5, 2)) + + F = FunctionTransformer(np.around, kw_args=dict(decimals=3)) + + F.kw_args["decimals"] = 1 + + # Test that rounding is correct + assert_array_equal(F.transform(X), np.around(X, decimals=1)) + + +def test_kw_arg_reset(): + X = np.linspace(0, 1, num=10).reshape((5, 2)) + + F = FunctionTransformer(np.around, kw_args=dict(decimals=3)) + + F.kw_args = dict(decimals=1) + + # Test that rounding is correct + assert_array_equal(F.transform(X), np.around(X, decimals=1)) + + +def test_inverse_transform(): + X = np.array([1, 4, 9, 16]).reshape((2, 2)) + + # Test that inverse_transform works correctly + F = FunctionTransformer( + func=np.sqrt, + inverse_func=np.around, + inv_kw_args=dict(decimals=3), + ) + assert_array_equal( + F.inverse_transform(F.transform(X)), + np.around(np.sqrt(X), decimals=3), + ) + + +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) +def test_check_inverse(sparse_container): + X = np.array([1, 4, 9, 16], dtype=np.float64).reshape((2, 2)) + if sparse_container is not None: + X = sparse_container(X) + + trans = FunctionTransformer( + func=np.sqrt, + inverse_func=np.around, + accept_sparse=sparse_container is not None, + check_inverse=True, + validate=True, + ) + warning_message = ( + "The provided functions are not strictly" + " inverse of each other. If you are sure you" + " want to proceed regardless, set" + " 'check_inverse=False'." + ) + with pytest.warns(UserWarning, match=warning_message): + trans.fit(X) + + trans = FunctionTransformer( + func=np.expm1, + inverse_func=np.log1p, + accept_sparse=sparse_container is not None, + check_inverse=True, + validate=True, + ) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + Xt = trans.fit_transform(X) + + assert_allclose_dense_sparse(X, trans.inverse_transform(Xt)) + + +def test_check_inverse_func_or_inverse_not_provided(): + # check that we don't check inverse when one of the func or inverse is not + # provided. + X = np.array([1, 4, 9, 16], dtype=np.float64).reshape((2, 2)) + + trans = FunctionTransformer( + func=np.expm1, inverse_func=None, check_inverse=True, validate=True + ) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + trans.fit(X) + trans = FunctionTransformer( + func=None, inverse_func=np.expm1, check_inverse=True, validate=True + ) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + trans.fit(X) + + +def test_function_transformer_frame(): + pd = pytest.importorskip("pandas") + X_df = pd.DataFrame(np.random.randn(100, 10)) + transformer = FunctionTransformer() + X_df_trans = transformer.fit_transform(X_df) + assert hasattr(X_df_trans, "loc") + + +@pytest.mark.parametrize("X_type", ["array", "series"]) +def test_function_transformer_raise_error_with_mixed_dtype(X_type): + """Check that `FunctionTransformer.check_inverse` raises error on mixed dtype.""" + mapping = {"one": 1, "two": 2, "three": 3, 5: "five", 6: "six"} + inverse_mapping = {value: key for key, value in mapping.items()} + dtype = "object" + + data = ["one", "two", "three", "one", "one", 5, 6] + data = _convert_container(data, X_type, columns_name=["value"], dtype=dtype) + + def func(X): + return np.array([mapping[X[i]] for i in range(X.size)], dtype=object) + + def inverse_func(X): + return _convert_container( + [inverse_mapping[x] for x in X], + X_type, + columns_name=["value"], + dtype=dtype, + ) + + transformer = FunctionTransformer( + func=func, inverse_func=inverse_func, validate=False, check_inverse=True + ) + + msg = "'check_inverse' is only supported when all the elements in `X` is numerical." + with pytest.raises(ValueError, match=msg): + transformer.fit(data) + + +def test_function_transformer_support_all_nummerical_dataframes_check_inverse_True(): + """Check support for dataframes with only numerical values.""" + pd = pytest.importorskip("pandas") + + df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + transformer = FunctionTransformer( + func=lambda x: x + 2, inverse_func=lambda x: x - 2, check_inverse=True + ) + + # Does not raise an error + df_out = transformer.fit_transform(df) + assert_allclose_dense_sparse(df_out, df + 2) + + +def test_function_transformer_with_dataframe_and_check_inverse_True(): + """Check error is raised when check_inverse=True. + + Non-regresion test for gh-25261. + """ + pd = pytest.importorskip("pandas") + transformer = FunctionTransformer( + func=lambda x: x, inverse_func=lambda x: x, check_inverse=True + ) + + df_mixed = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) + msg = "'check_inverse' is only supported when all the elements in `X` is numerical." + with pytest.raises(ValueError, match=msg): + transformer.fit(df_mixed) + + +@pytest.mark.parametrize( + "X, feature_names_out, input_features, expected", + [ + ( + # NumPy inputs, default behavior: generate names + np.random.rand(100, 3), + "one-to-one", + None, + ("x0", "x1", "x2"), + ), + ( + # Pandas input, default behavior: use input feature names + {"a": np.random.rand(100), "b": np.random.rand(100)}, + "one-to-one", + None, + ("a", "b"), + ), + ( + # NumPy input, feature_names_out=callable + np.random.rand(100, 3), + lambda transformer, input_features: ("a", "b"), + None, + ("a", "b"), + ), + ( + # Pandas input, feature_names_out=callable + {"a": np.random.rand(100), "b": np.random.rand(100)}, + lambda transformer, input_features: ("c", "d", "e"), + None, + ("c", "d", "e"), + ), + ( + # NumPy input, feature_names_out=callable – default input_features + np.random.rand(100, 3), + lambda transformer, input_features: tuple(input_features) + ("a",), + None, + ("x0", "x1", "x2", "a"), + ), + ( + # Pandas input, feature_names_out=callable – default input_features + {"a": np.random.rand(100), "b": np.random.rand(100)}, + lambda transformer, input_features: tuple(input_features) + ("c",), + None, + ("a", "b", "c"), + ), + ( + # NumPy input, input_features=list of names + np.random.rand(100, 3), + "one-to-one", + ("a", "b", "c"), + ("a", "b", "c"), + ), + ( + # Pandas input, input_features=list of names + {"a": np.random.rand(100), "b": np.random.rand(100)}, + "one-to-one", + ("a", "b"), # must match feature_names_in_ + ("a", "b"), + ), + ( + # NumPy input, feature_names_out=callable, input_features=list + np.random.rand(100, 3), + lambda transformer, input_features: tuple(input_features) + ("d",), + ("a", "b", "c"), + ("a", "b", "c", "d"), + ), + ( + # Pandas input, feature_names_out=callable, input_features=list + {"a": np.random.rand(100), "b": np.random.rand(100)}, + lambda transformer, input_features: tuple(input_features) + ("c",), + ("a", "b"), # must match feature_names_in_ + ("a", "b", "c"), + ), + ], +) +@pytest.mark.parametrize("validate", [True, False]) +def test_function_transformer_get_feature_names_out( + X, feature_names_out, input_features, expected, validate +): + if isinstance(X, dict): + pd = pytest.importorskip("pandas") + X = pd.DataFrame(X) + + transformer = FunctionTransformer( + feature_names_out=feature_names_out, validate=validate + ) + transformer.fit(X) + names = transformer.get_feature_names_out(input_features) + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, expected) + + +def test_function_transformer_get_feature_names_out_without_validation(): + transformer = FunctionTransformer(feature_names_out="one-to-one", validate=False) + X = np.random.rand(100, 2) + transformer.fit_transform(X) + + names = transformer.get_feature_names_out(("a", "b")) + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, ("a", "b")) + + +def test_function_transformer_feature_names_out_is_None(): + transformer = FunctionTransformer() + X = np.random.rand(100, 2) + transformer.fit_transform(X) + + msg = "This 'FunctionTransformer' has no attribute 'get_feature_names_out'" + with pytest.raises(AttributeError, match=msg): + transformer.get_feature_names_out() + + +def test_function_transformer_feature_names_out_uses_estimator(): + def add_n_random_features(X, n): + return np.concatenate([X, np.random.rand(len(X), n)], axis=1) + + def feature_names_out(transformer, input_features): + n = transformer.kw_args["n"] + return list(input_features) + [f"rnd{i}" for i in range(n)] + + transformer = FunctionTransformer( + func=add_n_random_features, + feature_names_out=feature_names_out, + kw_args=dict(n=3), + validate=True, + ) + pd = pytest.importorskip("pandas") + df = pd.DataFrame({"a": np.random.rand(100), "b": np.random.rand(100)}) + transformer.fit_transform(df) + names = transformer.get_feature_names_out() + + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, ("a", "b", "rnd0", "rnd1", "rnd2")) + + +def test_function_transformer_validate_inverse(): + """Test that function transformer does not reset estimator in + `inverse_transform`.""" + + def add_constant_feature(X): + X_one = np.ones((X.shape[0], 1)) + return np.concatenate((X, X_one), axis=1) + + def inverse_add_constant(X): + return X[:, :-1] + + X = np.array([[1, 2], [3, 4], [3, 4]]) + trans = FunctionTransformer( + func=add_constant_feature, + inverse_func=inverse_add_constant, + validate=True, + ) + X_trans = trans.fit_transform(X) + assert trans.n_features_in_ == X.shape[1] + + trans.inverse_transform(X_trans) + assert trans.n_features_in_ == X.shape[1] + + +@pytest.mark.parametrize( + "feature_names_out, expected", + [ + ("one-to-one", ["pet", "color"]), + [lambda est, names: [f"{n}_out" for n in names], ["pet_out", "color_out"]], + ], +) +@pytest.mark.parametrize("in_pipeline", [True, False]) +def test_get_feature_names_out_dataframe_with_string_data( + feature_names_out, expected, in_pipeline +): + """Check that get_feature_names_out works with DataFrames with string data.""" + pd = pytest.importorskip("pandas") + X = pd.DataFrame({"pet": ["dog", "cat"], "color": ["red", "green"]}) + + def func(X): + if feature_names_out == "one-to-one": + return X + else: + name = feature_names_out(None, X.columns) + return X.rename(columns=dict(zip(X.columns, name))) + + transformer = FunctionTransformer(func=func, feature_names_out=feature_names_out) + if in_pipeline: + transformer = make_pipeline(transformer) + + X_trans = transformer.fit_transform(X) + assert isinstance(X_trans, pd.DataFrame) + + names = transformer.get_feature_names_out() + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, expected) + + +def test_set_output_func(): + """Check behavior of set_output with different settings.""" + pd = pytest.importorskip("pandas") + + X = pd.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]}) + + ft = FunctionTransformer(np.log, feature_names_out="one-to-one") + + # no warning is raised when feature_names_out is defined + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + ft.set_output(transform="pandas") + + X_trans = ft.fit_transform(X) + assert isinstance(X_trans, pd.DataFrame) + assert_array_equal(X_trans.columns, ["a", "b"]) + + ft = FunctionTransformer(lambda x: 2 * x) + ft.set_output(transform="pandas") + + # no warning is raised when func returns a panda dataframe + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + X_trans = ft.fit_transform(X) + assert isinstance(X_trans, pd.DataFrame) + assert_array_equal(X_trans.columns, ["a", "b"]) + + # Warning is raised when func returns a ndarray + ft_np = FunctionTransformer(lambda x: np.asarray(x)) + + for transform in ("pandas", "polars"): + ft_np.set_output(transform=transform) + msg = ( + f"When `set_output` is configured to be '{transform}'.*{transform} " + "DataFrame.*" + ) + with pytest.warns(UserWarning, match=msg): + ft_np.fit_transform(X) + + # default transform does not warn + ft_np.set_output(transform="default") + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + ft_np.fit_transform(X) + + +def test_consistence_column_name_between_steps(): + """Check that we have a consistence between the feature names out of + `FunctionTransformer` and the feature names in of the next step in the pipeline. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27695 + """ + pd = pytest.importorskip("pandas") + + def with_suffix(_, names): + return [name + "__log" for name in names] + + pipeline = make_pipeline( + FunctionTransformer(np.log1p, feature_names_out=with_suffix), StandardScaler() + ) + + df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=["a", "b"]) + X_trans = pipeline.fit_transform(df) + assert pipeline.get_feature_names_out().tolist() == ["a__log", "b__log"] + # StandardScaler will convert to a numpy array + assert isinstance(X_trans, np.ndarray) + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +@pytest.mark.parametrize("transform_output", ["default", "pandas", "polars"]) +def test_function_transformer_overwrite_column_names(dataframe_lib, transform_output): + """Check that we overwrite the column names when we should.""" + lib = pytest.importorskip(dataframe_lib) + if transform_output != "numpy": + pytest.importorskip(transform_output) + + df = lib.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]}) + + def with_suffix(_, names): + return [name + "__log" for name in names] + + transformer = FunctionTransformer(feature_names_out=with_suffix).set_output( + transform=transform_output + ) + X_trans = transformer.fit_transform(df) + assert_array_equal(np.asarray(X_trans), np.asarray(df)) + + feature_names = transformer.get_feature_names_out() + assert list(X_trans.columns) == with_suffix(None, df.columns) + assert feature_names.tolist() == with_suffix(None, df.columns) + + +@pytest.mark.parametrize( + "feature_names_out", + ["one-to-one", lambda _, names: [f"{name}_log" for name in names]], +) +def test_function_transformer_overwrite_column_names_numerical(feature_names_out): + """Check the same as `test_function_transformer_overwrite_column_names` + but for the specific case of pandas where column names can be numerical.""" + pd = pytest.importorskip("pandas") + + df = pd.DataFrame({0: [1, 2, 3], 1: [10, 20, 100]}) + + transformer = FunctionTransformer(feature_names_out=feature_names_out) + X_trans = transformer.fit_transform(df) + assert_array_equal(np.asarray(X_trans), np.asarray(df)) + + feature_names = transformer.get_feature_names_out() + assert list(X_trans.columns) == list(feature_names) + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +@pytest.mark.parametrize( + "feature_names_out", + ["one-to-one", lambda _, names: [f"{name}_log" for name in names]], +) +def test_function_transformer_error_column_inconsistent( + dataframe_lib, feature_names_out +): + """Check that we raise an error when `func` returns a dataframe with new + column names that become inconsistent with `get_feature_names_out`.""" + lib = pytest.importorskip(dataframe_lib) + + df = lib.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]}) + + def func(df): + if dataframe_lib == "pandas": + return df.rename(columns={"a": "c"}) + else: + return df.rename({"a": "c"}) + + transformer = FunctionTransformer(func=func, feature_names_out=feature_names_out) + err_msg = "The output generated by `func` have different column names" + with pytest.raises(ValueError, match=err_msg): + transformer.fit_transform(df).columns diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_label.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_label.py new file mode 100644 index 0000000000000000000000000000000000000000..cce0ddc5c267eb77ef85b64e5257080d75d09449 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_label.py @@ -0,0 +1,699 @@ +import numpy as np +import pytest +from scipy.sparse import issparse + +from sklearn import datasets +from sklearn.preprocessing._label import ( + LabelBinarizer, + LabelEncoder, + MultiLabelBinarizer, + _inverse_binarize_multiclass, + _inverse_binarize_thresholding, + label_binarize, +) +from sklearn.utils import _to_object_array +from sklearn.utils._testing import assert_array_equal, ignore_warnings +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) +from sklearn.utils.multiclass import type_of_target + +iris = datasets.load_iris() + + +def toarray(a): + if hasattr(a, "toarray"): + a = a.toarray() + return a + + +def test_label_binarizer(): + # one-class case defaults to negative label + # For dense case: + inp = ["pos", "pos", "pos", "pos"] + lb = LabelBinarizer(sparse_output=False) + expected = np.array([[0, 0, 0, 0]]).T + got = lb.fit_transform(inp) + assert_array_equal(lb.classes_, ["pos"]) + assert_array_equal(expected, got) + assert_array_equal(lb.inverse_transform(got), inp) + + # For sparse case: + lb = LabelBinarizer(sparse_output=True) + got = lb.fit_transform(inp) + assert issparse(got) + assert_array_equal(lb.classes_, ["pos"]) + assert_array_equal(expected, got.toarray()) + assert_array_equal(lb.inverse_transform(got.toarray()), inp) + + lb = LabelBinarizer(sparse_output=False) + # two-class case + inp = ["neg", "pos", "pos", "neg"] + expected = np.array([[0, 1, 1, 0]]).T + got = lb.fit_transform(inp) + assert_array_equal(lb.classes_, ["neg", "pos"]) + assert_array_equal(expected, got) + + to_invert = np.array([[1, 0], [0, 1], [0, 1], [1, 0]]) + assert_array_equal(lb.inverse_transform(to_invert), inp) + + # multi-class case + inp = ["spam", "ham", "eggs", "ham", "0"] + expected = np.array( + [[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]] + ) + got = lb.fit_transform(inp) + assert_array_equal(lb.classes_, ["0", "eggs", "ham", "spam"]) + assert_array_equal(expected, got) + assert_array_equal(lb.inverse_transform(got), inp) + + +def test_label_binarizer_unseen_labels(): + lb = LabelBinarizer() + + expected = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + got = lb.fit_transform(["b", "d", "e"]) + assert_array_equal(expected, got) + + expected = np.array( + [[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]] + ) + got = lb.transform(["a", "b", "c", "d", "e", "f"]) + assert_array_equal(expected, got) + + +def test_label_binarizer_set_label_encoding(): + lb = LabelBinarizer(neg_label=-2, pos_label=0) + + # two-class case with pos_label=0 + inp = np.array([0, 1, 1, 0]) + expected = np.array([[-2, 0, 0, -2]]).T + got = lb.fit_transform(inp) + assert_array_equal(expected, got) + assert_array_equal(lb.inverse_transform(got), inp) + + lb = LabelBinarizer(neg_label=-2, pos_label=2) + + # multi-class case + inp = np.array([3, 2, 1, 2, 0]) + expected = np.array( + [ + [-2, -2, -2, +2], + [-2, -2, +2, -2], + [-2, +2, -2, -2], + [-2, -2, +2, -2], + [+2, -2, -2, -2], + ] + ) + got = lb.fit_transform(inp) + assert_array_equal(expected, got) + assert_array_equal(lb.inverse_transform(got), inp) + + +@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"]) +@pytest.mark.parametrize("unique_first", [True, False]) +def test_label_binarizer_pandas_nullable(dtype, unique_first): + """Checks that LabelBinarizer works with pandas nullable dtypes. + + Non-regression test for gh-25637. + """ + pd = pytest.importorskip("pandas") + + y_true = pd.Series([1, 0, 0, 1, 0, 1, 1, 0, 1], dtype=dtype) + if unique_first: + # Calling unique creates a pandas array which has a different interface + # compared to a pandas Series. Specifically, pandas arrays do not have "iloc". + y_true = y_true.unique() + lb = LabelBinarizer().fit(y_true) + y_out = lb.transform([1, 0]) + + assert_array_equal(y_out, [[1], [0]]) + + +@ignore_warnings +def test_label_binarizer_errors(): + # Check that invalid arguments yield ValueError + one_class = np.array([0, 0, 0, 0]) + lb = LabelBinarizer().fit(one_class) + + multi_label = [(2, 3), (0,), (0, 2)] + err_msg = "You appear to be using a legacy multi-label data representation." + with pytest.raises(ValueError, match=err_msg): + lb.transform(multi_label) + + lb = LabelBinarizer() + err_msg = "This LabelBinarizer instance is not fitted yet" + with pytest.raises(ValueError, match=err_msg): + lb.transform([]) + with pytest.raises(ValueError, match=err_msg): + lb.inverse_transform([]) + + input_labels = [0, 1, 0, 1] + err_msg = "neg_label=2 must be strictly less than pos_label=1." + lb = LabelBinarizer(neg_label=2, pos_label=1) + with pytest.raises(ValueError, match=err_msg): + lb.fit(input_labels) + err_msg = "neg_label=2 must be strictly less than pos_label=2." + lb = LabelBinarizer(neg_label=2, pos_label=2) + with pytest.raises(ValueError, match=err_msg): + lb.fit(input_labels) + err_msg = ( + "Sparse binarization is only supported with non zero pos_label and zero " + "neg_label, got pos_label=2 and neg_label=1" + ) + lb = LabelBinarizer(neg_label=1, pos_label=2, sparse_output=True) + with pytest.raises(ValueError, match=err_msg): + lb.fit(input_labels) + + # Sequence of seq type should raise ValueError + y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]] + err_msg = "You appear to be using a legacy multi-label data representation" + with pytest.raises(ValueError, match=err_msg): + LabelBinarizer().fit_transform(y_seq_of_seqs) + + # Fail on the dimension of 'binary' + err_msg = "output_type='binary', but y.shape" + with pytest.raises(ValueError, match=err_msg): + _inverse_binarize_thresholding( + y=np.array([[1, 2, 3], [2, 1, 3]]), + output_type="binary", + classes=[1, 2, 3], + threshold=0, + ) + + # Fail on multioutput data + err_msg = "Multioutput target data is not supported with label binarization" + with pytest.raises(ValueError, match=err_msg): + LabelBinarizer().fit(np.array([[1, 3], [2, 1]])) + with pytest.raises(ValueError, match=err_msg): + label_binarize(np.array([[1, 3], [2, 1]]), classes=[1, 2, 3]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_label_binarizer_sparse_errors(csr_container): + # Fail on y_type + err_msg = "foo format is not supported" + with pytest.raises(ValueError, match=err_msg): + _inverse_binarize_thresholding( + y=csr_container([[1, 2], [2, 1]]), + output_type="foo", + classes=[1, 2], + threshold=0, + ) + + # Fail on the number of classes + err_msg = "The number of class is not equal to the number of dimension of y." + with pytest.raises(ValueError, match=err_msg): + _inverse_binarize_thresholding( + y=csr_container([[1, 2], [2, 1]]), + output_type="foo", + classes=[1, 2, 3], + threshold=0, + ) + + +@pytest.mark.parametrize( + "values, classes, unknown", + [ + ( + np.array([2, 1, 3, 1, 3], dtype="int64"), + np.array([1, 2, 3], dtype="int64"), + np.array([4], dtype="int64"), + ), + ( + np.array(["b", "a", "c", "a", "c"], dtype=object), + np.array(["a", "b", "c"], dtype=object), + np.array(["d"], dtype=object), + ), + ( + np.array(["b", "a", "c", "a", "c"]), + np.array(["a", "b", "c"]), + np.array(["d"]), + ), + ], + ids=["int64", "object", "str"], +) +def test_label_encoder(values, classes, unknown): + # Test LabelEncoder's transform, fit_transform and + # inverse_transform methods + le = LabelEncoder() + le.fit(values) + assert_array_equal(le.classes_, classes) + assert_array_equal(le.transform(values), [1, 0, 2, 0, 2]) + assert_array_equal(le.inverse_transform([1, 0, 2, 0, 2]), values) + le = LabelEncoder() + ret = le.fit_transform(values) + assert_array_equal(ret, [1, 0, 2, 0, 2]) + + with pytest.raises(ValueError, match="unseen labels"): + le.transform(unknown) + + +def test_label_encoder_negative_ints(): + le = LabelEncoder() + le.fit([1, 1, 4, 5, -1, 0]) + assert_array_equal(le.classes_, [-1, 0, 1, 4, 5]) + assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]), [1, 2, 3, 3, 4, 0, 0]) + assert_array_equal( + le.inverse_transform([1, 2, 3, 3, 4, 0, 0]), [0, 1, 4, 4, 5, -1, -1] + ) + with pytest.raises(ValueError): + le.transform([0, 6]) + + +@pytest.mark.parametrize("dtype", ["str", "object"]) +def test_label_encoder_str_bad_shape(dtype): + le = LabelEncoder() + le.fit(np.array(["apple", "orange"], dtype=dtype)) + msg = "should be a 1d array" + with pytest.raises(ValueError, match=msg): + le.transform("apple") + + +def test_label_encoder_errors(): + # Check that invalid arguments yield ValueError + le = LabelEncoder() + with pytest.raises(ValueError): + le.transform([]) + with pytest.raises(ValueError): + le.inverse_transform([]) + + # Fail on unseen labels + le = LabelEncoder() + le.fit([1, 2, 3, -1, 1]) + msg = "contains previously unseen labels" + with pytest.raises(ValueError, match=msg): + le.inverse_transform([-2]) + with pytest.raises(ValueError, match=msg): + le.inverse_transform([-2, -3, -4]) + + # Fail on inverse_transform("") + msg = r"should be a 1d array.+shape \(\)" + with pytest.raises(ValueError, match=msg): + le.inverse_transform("") + + +@pytest.mark.parametrize( + "values", + [ + np.array([2, 1, 3, 1, 3], dtype="int64"), + np.array(["b", "a", "c", "a", "c"], dtype=object), + np.array(["b", "a", "c", "a", "c"]), + ], + ids=["int64", "object", "str"], +) +def test_label_encoder_empty_array(values): + le = LabelEncoder() + le.fit(values) + # test empty transform + transformed = le.transform([]) + assert_array_equal(np.array([]), transformed) + # test empty inverse transform + inverse_transformed = le.inverse_transform([]) + assert_array_equal(np.array([]), inverse_transformed) + + +def test_sparse_output_multilabel_binarizer(): + # test input as iterable of iterables + inputs = [ + lambda: [(2, 3), (1,), (1, 2)], + lambda: ({2, 3}, {1}, {1, 2}), + lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]), + ] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) + + inverse = inputs[0]() + for sparse_output in [True, False]: + for inp in inputs: + # With fit_transform + mlb = MultiLabelBinarizer(sparse_output=sparse_output) + got = mlb.fit_transform(inp()) + assert issparse(got) == sparse_output + if sparse_output: + # verify CSR assumption that indices and indptr have same dtype + assert got.indices.dtype == got.indptr.dtype + got = got.toarray() + assert_array_equal(indicator_mat, got) + assert_array_equal([1, 2, 3], mlb.classes_) + assert mlb.inverse_transform(got) == inverse + + # With fit + mlb = MultiLabelBinarizer(sparse_output=sparse_output) + got = mlb.fit(inp()).transform(inp()) + assert issparse(got) == sparse_output + if sparse_output: + # verify CSR assumption that indices and indptr have same dtype + assert got.indices.dtype == got.indptr.dtype + got = got.toarray() + assert_array_equal(indicator_mat, got) + assert_array_equal([1, 2, 3], mlb.classes_) + assert mlb.inverse_transform(got) == inverse + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_output_multilabel_binarizer_errors(csr_container): + inp = iter([iter((2, 3)), iter((1,)), {1, 2}]) + mlb = MultiLabelBinarizer(sparse_output=False) + mlb.fit(inp) + with pytest.raises(ValueError): + mlb.inverse_transform( + csr_container(np.array([[0, 1, 1], [2, 0, 0], [1, 1, 0]])) + ) + + +def test_multilabel_binarizer(): + # test input as iterable of iterables + inputs = [ + lambda: [(2, 3), (1,), (1, 2)], + lambda: ({2, 3}, {1}, {1, 2}), + lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]), + ] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) + inverse = inputs[0]() + for inp in inputs: + # With fit_transform + mlb = MultiLabelBinarizer() + got = mlb.fit_transform(inp()) + assert_array_equal(indicator_mat, got) + assert_array_equal([1, 2, 3], mlb.classes_) + assert mlb.inverse_transform(got) == inverse + + # With fit + mlb = MultiLabelBinarizer() + got = mlb.fit(inp()).transform(inp()) + assert_array_equal(indicator_mat, got) + assert_array_equal([1, 2, 3], mlb.classes_) + assert mlb.inverse_transform(got) == inverse + + +def test_multilabel_binarizer_empty_sample(): + mlb = MultiLabelBinarizer() + y = [[1, 2], [1], []] + Y = np.array([[1, 1], [1, 0], [0, 0]]) + assert_array_equal(mlb.fit_transform(y), Y) + + +def test_multilabel_binarizer_unknown_class(): + mlb = MultiLabelBinarizer() + y = [[1, 2]] + Y = np.array([[1, 0], [0, 1]]) + warning_message = "unknown class.* will be ignored" + with pytest.warns(UserWarning, match=warning_message): + matrix = mlb.fit(y).transform([[4, 1], [2, 0]]) + + Y = np.array([[1, 0, 0], [0, 1, 0]]) + mlb = MultiLabelBinarizer(classes=[1, 2, 3]) + with pytest.warns(UserWarning, match=warning_message): + matrix = mlb.fit(y).transform([[4, 1], [2, 0]]) + assert_array_equal(matrix, Y) + + +def test_multilabel_binarizer_given_classes(): + inp = [(2, 3), (1,), (1, 2)] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1]]) + # fit_transform() + mlb = MultiLabelBinarizer(classes=[1, 3, 2]) + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + assert_array_equal(mlb.classes_, [1, 3, 2]) + + # fit().transform() + mlb = MultiLabelBinarizer(classes=[1, 3, 2]) + assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) + assert_array_equal(mlb.classes_, [1, 3, 2]) + + # ensure works with extra class + mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2]) + assert_array_equal( + mlb.fit_transform(inp), np.hstack(([[0], [0], [0]], indicator_mat)) + ) + assert_array_equal(mlb.classes_, [4, 1, 3, 2]) + + # ensure fit is no-op as iterable is not consumed + inp = iter(inp) + mlb = MultiLabelBinarizer(classes=[1, 3, 2]) + assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) + + # ensure a ValueError is thrown if given duplicate classes + err_msg = ( + "The classes argument contains duplicate classes. Remove " + "these duplicates before passing them to MultiLabelBinarizer." + ) + mlb = MultiLabelBinarizer(classes=[1, 3, 2, 3]) + with pytest.raises(ValueError, match=err_msg): + mlb.fit(inp) + + +def test_multilabel_binarizer_multiple_calls(): + inp = [(2, 3), (1,), (1, 2)] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1]]) + + indicator_mat2 = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) + + # first call + mlb = MultiLabelBinarizer(classes=[1, 3, 2]) + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + # second call change class + mlb.classes = [1, 2, 3] + assert_array_equal(mlb.fit_transform(inp), indicator_mat2) + + +def test_multilabel_binarizer_same_length_sequence(): + # Ensure sequences of the same length are not interpreted as a 2-d array + inp = [[1], [0], [2]] + indicator_mat = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) + # fit_transform() + mlb = MultiLabelBinarizer() + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + assert_array_equal(mlb.inverse_transform(indicator_mat), inp) + + # fit().transform() + mlb = MultiLabelBinarizer() + assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) + assert_array_equal(mlb.inverse_transform(indicator_mat), inp) + + +def test_multilabel_binarizer_non_integer_labels(): + tuple_classes = _to_object_array([(1,), (2,), (3,)]) + inputs = [ + ([("2", "3"), ("1",), ("1", "2")], ["1", "2", "3"]), + ([("b", "c"), ("a",), ("a", "b")], ["a", "b", "c"]), + ([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes), + ] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) + for inp, classes in inputs: + # fit_transform() + mlb = MultiLabelBinarizer() + inp = np.array(inp, dtype=object) + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + assert_array_equal(mlb.classes_, classes) + indicator_mat_inv = np.array(mlb.inverse_transform(indicator_mat), dtype=object) + assert_array_equal(indicator_mat_inv, inp) + + # fit().transform() + mlb = MultiLabelBinarizer() + assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) + assert_array_equal(mlb.classes_, classes) + indicator_mat_inv = np.array(mlb.inverse_transform(indicator_mat), dtype=object) + assert_array_equal(indicator_mat_inv, inp) + + mlb = MultiLabelBinarizer() + with pytest.raises(TypeError): + mlb.fit_transform([({}), ({}, {"a": "b"})]) + + +def test_multilabel_binarizer_non_unique(): + inp = [(1, 1, 1, 0)] + indicator_mat = np.array([[1, 1]]) + mlb = MultiLabelBinarizer() + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + + +def test_multilabel_binarizer_inverse_validation(): + inp = [(1, 1, 1, 0)] + mlb = MultiLabelBinarizer() + mlb.fit_transform(inp) + # Not binary + with pytest.raises(ValueError): + mlb.inverse_transform(np.array([[1, 3]])) + # The following binary cases are fine, however + mlb.inverse_transform(np.array([[0, 0]])) + mlb.inverse_transform(np.array([[1, 1]])) + mlb.inverse_transform(np.array([[1, 0]])) + + # Wrong shape + with pytest.raises(ValueError): + mlb.inverse_transform(np.array([[1]])) + with pytest.raises(ValueError): + mlb.inverse_transform(np.array([[1, 1, 1]])) + + +def test_label_binarize_with_class_order(): + out = label_binarize([1, 6], classes=[1, 2, 4, 6]) + expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]]) + assert_array_equal(out, expected) + + # Modified class order + out = label_binarize([1, 6], classes=[1, 6, 4, 2]) + expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]]) + assert_array_equal(out, expected) + + out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1]) + expected = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0]]) + assert_array_equal(out, expected) + + +def check_binarized_results(y, classes, pos_label, neg_label, expected): + for sparse_output in [True, False]: + if (pos_label == 0 or neg_label != 0) and sparse_output: + with pytest.raises(ValueError): + label_binarize( + y, + classes=classes, + neg_label=neg_label, + pos_label=pos_label, + sparse_output=sparse_output, + ) + continue + + # check label_binarize + binarized = label_binarize( + y, + classes=classes, + neg_label=neg_label, + pos_label=pos_label, + sparse_output=sparse_output, + ) + assert_array_equal(toarray(binarized), expected) + assert issparse(binarized) == sparse_output + + # check inverse + y_type = type_of_target(y) + if y_type == "multiclass": + inversed = _inverse_binarize_multiclass(binarized, classes=classes) + + else: + inversed = _inverse_binarize_thresholding( + binarized, + output_type=y_type, + classes=classes, + threshold=((neg_label + pos_label) / 2.0), + ) + + assert_array_equal(toarray(inversed), toarray(y)) + + # Check label binarizer + lb = LabelBinarizer( + neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output + ) + binarized = lb.fit_transform(y) + assert_array_equal(toarray(binarized), expected) + assert issparse(binarized) == sparse_output + inverse_output = lb.inverse_transform(binarized) + assert_array_equal(toarray(inverse_output), toarray(y)) + assert issparse(inverse_output) == issparse(y) + + +def test_label_binarize_binary(): + y = [0, 1, 0] + classes = [0, 1] + pos_label = 2 + neg_label = -1 + expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1)) + + check_binarized_results(y, classes, pos_label, neg_label, expected) + + # Binary case where sparse_output = True will not result in a ValueError + y = [0, 1, 0] + classes = [0, 1] + pos_label = 3 + neg_label = 0 + expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1)) + + check_binarized_results(y, classes, pos_label, neg_label, expected) + + +def test_label_binarize_multiclass(): + y = [0, 1, 2] + classes = [0, 1, 2] + pos_label = 2 + neg_label = 0 + expected = 2 * np.eye(3) + + check_binarized_results(y, classes, pos_label, neg_label, expected) + + with pytest.raises(ValueError): + label_binarize( + y, classes=classes, neg_label=-1, pos_label=pos_label, sparse_output=True + ) + + +@pytest.mark.parametrize( + "arr_type", + [np.array] + + COO_CONTAINERS + + CSC_CONTAINERS + + CSR_CONTAINERS + + DOK_CONTAINERS + + LIL_CONTAINERS, +) +def test_label_binarize_multilabel(arr_type): + y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]]) + classes = [0, 1, 2] + pos_label = 2 + neg_label = 0 + expected = pos_label * y_ind + y = arr_type(y_ind) + + check_binarized_results(y, classes, pos_label, neg_label, expected) + + with pytest.raises(ValueError): + label_binarize( + y, classes=classes, neg_label=-1, pos_label=pos_label, sparse_output=True + ) + + +def test_invalid_input_label_binarize(): + with pytest.raises(ValueError): + label_binarize([0, 2], classes=[0, 2], pos_label=0, neg_label=1) + with pytest.raises(ValueError, match="continuous target data is not "): + label_binarize([1.2, 2.7], classes=[0, 1]) + with pytest.raises(ValueError, match="mismatch with the labels"): + label_binarize([[1, 3]], classes=[1, 2, 3]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_inverse_binarize_multiclass(csr_container): + got = _inverse_binarize_multiclass( + csr_container([[0, 1, 0], [-1, 0, -1], [0, 0, 0]]), np.arange(3) + ) + assert_array_equal(got, np.array([1, 1, 0])) + + +def test_nan_label_encoder(): + """Check that label encoder encodes nans in transform. + + Non-regression test for #22628. + """ + le = LabelEncoder() + le.fit(["a", "a", "b", np.nan]) + + y_trans = le.transform([np.nan]) + assert_array_equal(y_trans, [2]) + + +@pytest.mark.parametrize( + "encoder", [LabelEncoder(), LabelBinarizer(), MultiLabelBinarizer()] +) +def test_label_encoders_do_not_have_set_output(encoder): + """Check that label encoders do not define set_output and work with y as a kwarg. + + Non-regression test for #26854. + """ + assert not hasattr(encoder, "set_output") + y_encoded_with_kwarg = encoder.fit_transform(y=["a", "b", "c"]) + y_encoded_positional = encoder.fit_transform(["a", "b", "c"]) + assert_array_equal(y_encoded_with_kwarg, y_encoded_positional) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_polynomial.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..b97500d43ef731b47fa5788a8bc9bd8ec47fd32a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_polynomial.py @@ -0,0 +1,1258 @@ +import sys + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal +from scipy import sparse +from scipy.interpolate import BSpline +from scipy.sparse import random as sparse_random + +from sklearn.linear_model import LinearRegression +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import ( + KBinsDiscretizer, + PolynomialFeatures, + SplineTransformer, +) +from sklearn.preprocessing._csr_polynomial_expansion import ( + _calc_expanded_nnz, + _calc_total_nnz, + _get_sizeof_LARGEST_INT_t, +) +from sklearn.utils._testing import assert_array_almost_equal +from sklearn.utils.fixes import ( + CSC_CONTAINERS, + CSR_CONTAINERS, + parse_version, + sp_version, +) + + +@pytest.mark.parametrize("est", (PolynomialFeatures, SplineTransformer)) +def test_polynomial_and_spline_array_order(est): + """Test that output array has the given order.""" + X = np.arange(10).reshape(5, 2) + + def is_c_contiguous(a): + return np.isfortran(a.T) + + assert is_c_contiguous(est().fit_transform(X)) + assert is_c_contiguous(est(order="C").fit_transform(X)) + assert np.isfortran(est(order="F").fit_transform(X)) + + +@pytest.mark.parametrize( + "params, err_msg", + [ + ({"knots": [[1]]}, r"Number of knots, knots.shape\[0\], must be >= 2."), + ({"knots": [[1, 1], [2, 2]]}, r"knots.shape\[1\] == n_features is violated"), + ({"knots": [[1], [0]]}, "knots must be sorted without duplicates."), + ], +) +def test_spline_transformer_input_validation(params, err_msg): + """Test that we raise errors for invalid input in SplineTransformer.""" + X = [[1], [2]] + + with pytest.raises(ValueError, match=err_msg): + SplineTransformer(**params).fit(X) + + +@pytest.mark.parametrize("extrapolation", ["continue", "periodic"]) +def test_spline_transformer_integer_knots(extrapolation): + """Test that SplineTransformer accepts integer value knot positions.""" + X = np.arange(20).reshape(10, 2) + knots = [[0, 1], [1, 2], [5, 5], [11, 10], [12, 11]] + _ = SplineTransformer( + degree=3, knots=knots, extrapolation=extrapolation + ).fit_transform(X) + + +def test_spline_transformer_feature_names(): + """Test that SplineTransformer generates correct features name.""" + X = np.arange(20).reshape(10, 2) + splt = SplineTransformer(n_knots=3, degree=3, include_bias=True).fit(X) + feature_names = splt.get_feature_names_out() + assert_array_equal( + feature_names, + [ + "x0_sp_0", + "x0_sp_1", + "x0_sp_2", + "x0_sp_3", + "x0_sp_4", + "x1_sp_0", + "x1_sp_1", + "x1_sp_2", + "x1_sp_3", + "x1_sp_4", + ], + ) + + splt = SplineTransformer(n_knots=3, degree=3, include_bias=False).fit(X) + feature_names = splt.get_feature_names_out(["a", "b"]) + assert_array_equal( + feature_names, + [ + "a_sp_0", + "a_sp_1", + "a_sp_2", + "a_sp_3", + "b_sp_0", + "b_sp_1", + "b_sp_2", + "b_sp_3", + ], + ) + + +@pytest.mark.parametrize( + "extrapolation", + ["constant", "linear", "continue", "periodic"], +) +@pytest.mark.parametrize("degree", [2, 3]) +def test_split_transform_feature_names_extrapolation_degree(extrapolation, degree): + """Test feature names are correct for different extrapolations and degree. + + Non-regression test for gh-25292. + """ + X = np.arange(20).reshape(10, 2) + splt = SplineTransformer(degree=degree, extrapolation=extrapolation).fit(X) + feature_names = splt.get_feature_names_out(["a", "b"]) + assert len(feature_names) == splt.n_features_out_ + + X_trans = splt.transform(X) + assert X_trans.shape[1] == len(feature_names) + + +@pytest.mark.parametrize("degree", range(1, 5)) +@pytest.mark.parametrize("n_knots", range(3, 5)) +@pytest.mark.parametrize("knots", ["uniform", "quantile"]) +@pytest.mark.parametrize("extrapolation", ["constant", "periodic"]) +def test_spline_transformer_unity_decomposition(degree, n_knots, knots, extrapolation): + """Test that B-splines are indeed a decomposition of unity. + + Splines basis functions must sum up to 1 per row, if we stay in between boundaries. + """ + X = np.linspace(0, 1, 100)[:, None] + # make the boundaries 0 and 1 part of X_train, for sure. + X_train = np.r_[[[0]], X[::2, :], [[1]]] + X_test = X[1::2, :] + + if extrapolation == "periodic": + n_knots = n_knots + degree # periodic splines require degree < n_knots + + splt = SplineTransformer( + n_knots=n_knots, + degree=degree, + knots=knots, + include_bias=True, + extrapolation=extrapolation, + ) + splt.fit(X_train) + for X in [X_train, X_test]: + assert_allclose(np.sum(splt.transform(X), axis=1), 1) + + +@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)]) +def test_spline_transformer_linear_regression(bias, intercept): + """Test that B-splines fit a sinusodial curve pretty well.""" + X = np.linspace(0, 10, 100)[:, None] + y = np.sin(X[:, 0]) + 2 # +2 to avoid the value 0 in assert_allclose + pipe = Pipeline( + steps=[ + ( + "spline", + SplineTransformer( + n_knots=15, + degree=3, + include_bias=bias, + extrapolation="constant", + ), + ), + ("ols", LinearRegression(fit_intercept=intercept)), + ] + ) + pipe.fit(X, y) + assert_allclose(pipe.predict(X), y, rtol=1e-3) + + +@pytest.mark.parametrize( + ["knots", "n_knots", "sample_weight", "expected_knots"], + [ + ("uniform", 3, None, np.array([[0, 2], [3, 8], [6, 14]])), + ( + "uniform", + 3, + np.array([0, 0, 1, 1, 0, 3, 1]), + np.array([[2, 2], [4, 8], [6, 14]]), + ), + ("uniform", 4, None, np.array([[0, 2], [2, 6], [4, 10], [6, 14]])), + ("quantile", 3, None, np.array([[0, 2], [3, 3], [6, 14]])), + ( + "quantile", + 3, + np.array([0, 0, 1, 1, 0, 3, 1]), + np.array([[2, 2], [5, 8], [6, 14]]), + ), + ], +) +def test_spline_transformer_get_base_knot_positions( + knots, n_knots, sample_weight, expected_knots +): + """Check the behaviour to find knot positions with and without sample_weight.""" + X = np.array([[0, 2], [0, 2], [2, 2], [3, 3], [4, 6], [5, 8], [6, 14]]) + base_knots = SplineTransformer._get_base_knot_positions( + X=X, knots=knots, n_knots=n_knots, sample_weight=sample_weight + ) + assert_allclose(base_knots, expected_knots) + + +@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)]) +def test_spline_transformer_periodic_linear_regression(bias, intercept): + """Test that B-splines fit a periodic curve pretty well.""" + + # "+ 3" to avoid the value 0 in assert_allclose + def f(x): + return np.sin(2 * np.pi * x) - np.sin(8 * np.pi * x) + 3 + + X = np.linspace(0, 1, 101)[:, None] + pipe = Pipeline( + steps=[ + ( + "spline", + SplineTransformer( + n_knots=20, + degree=3, + include_bias=bias, + extrapolation="periodic", + ), + ), + ("ols", LinearRegression(fit_intercept=intercept)), + ] + ) + pipe.fit(X, f(X[:, 0])) + + # Generate larger array to check periodic extrapolation + X_ = np.linspace(-1, 2, 301)[:, None] + predictions = pipe.predict(X_) + assert_allclose(predictions, f(X_[:, 0]), atol=0.01, rtol=0.01) + assert_allclose(predictions[0:100], predictions[100:200], rtol=1e-3) + + +def test_spline_transformer_periodic_spline_backport(): + """Test that the backport of extrapolate="periodic" works correctly""" + X = np.linspace(-2, 3.5, 10)[:, None] + degree = 2 + + # Use periodic extrapolation backport in SplineTransformer + transformer = SplineTransformer( + degree=degree, extrapolation="periodic", knots=[[-1.0], [0.0], [1.0]] + ) + Xt = transformer.fit_transform(X) + + # Use periodic extrapolation in BSpline + coef = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) + spl = BSpline(np.arange(-3, 4), coef, degree, "periodic") + Xspl = spl(X[:, 0]) + assert_allclose(Xt, Xspl) + + +def test_spline_transformer_periodic_splines_periodicity(): + """Test if shifted knots result in the same transformation up to permutation.""" + X = np.linspace(0, 10, 101)[:, None] + + transformer_1 = SplineTransformer( + degree=3, + extrapolation="periodic", + knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]], + ) + + transformer_2 = SplineTransformer( + degree=3, + extrapolation="periodic", + knots=[[1.0], [3.0], [4.0], [5.0], [8.0], [9.0]], + ) + + Xt_1 = transformer_1.fit_transform(X) + Xt_2 = transformer_2.fit_transform(X) + + assert_allclose(Xt_1, Xt_2[:, [4, 0, 1, 2, 3]]) + + +@pytest.mark.parametrize("degree", [3, 5]) +def test_spline_transformer_periodic_splines_smoothness(degree): + """Test that spline transformation is smooth at first / last knot.""" + X = np.linspace(-2, 10, 10_000)[:, None] + + transformer = SplineTransformer( + degree=degree, + extrapolation="periodic", + knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]], + ) + Xt = transformer.fit_transform(X) + + delta = (X.max() - X.min()) / len(X) + tol = 10 * delta + + dXt = Xt + # We expect splines of degree `degree` to be (`degree`-1) times + # continuously differentiable. I.e. for d = 0, ..., `degree` - 1 the d-th + # derivative should be continuous. This is the case if the (d+1)-th + # numerical derivative is reasonably small (smaller than `tol` in absolute + # value). We thus compute d-th numeric derivatives for d = 1, ..., `degree` + # and compare them to `tol`. + # + # Note that the 0-th derivative is the function itself, such that we are + # also checking its continuity. + for d in range(1, degree + 1): + # Check continuity of the (d-1)-th derivative + diff = np.diff(dXt, axis=0) + assert np.abs(diff).max() < tol + # Compute d-th numeric derivative + dXt = diff / delta + + # As degree `degree` splines are not `degree` times continuously + # differentiable at the knots, the `degree + 1`-th numeric derivative + # should have spikes at the knots. + diff = np.diff(dXt, axis=0) + assert np.abs(diff).max() > 1 + + +@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)]) +@pytest.mark.parametrize("degree", [1, 2, 3, 4, 5]) +def test_spline_transformer_extrapolation(bias, intercept, degree): + """Test that B-spline extrapolation works correctly.""" + # we use a straight line for that + X = np.linspace(-1, 1, 100)[:, None] + y = X.squeeze() + + # 'constant' + pipe = Pipeline( + [ + [ + "spline", + SplineTransformer( + n_knots=4, + degree=degree, + include_bias=bias, + extrapolation="constant", + ), + ], + ["ols", LinearRegression(fit_intercept=intercept)], + ] + ) + pipe.fit(X, y) + assert_allclose(pipe.predict([[-10], [5]]), [-1, 1]) + + # 'linear' + pipe = Pipeline( + [ + [ + "spline", + SplineTransformer( + n_knots=4, + degree=degree, + include_bias=bias, + extrapolation="linear", + ), + ], + ["ols", LinearRegression(fit_intercept=intercept)], + ] + ) + pipe.fit(X, y) + assert_allclose(pipe.predict([[-10], [5]]), [-10, 5]) + + # 'error' + splt = SplineTransformer( + n_knots=4, degree=degree, include_bias=bias, extrapolation="error" + ) + splt.fit(X) + msg = "X contains values beyond the limits of the knots" + with pytest.raises(ValueError, match=msg): + splt.transform([[-10]]) + with pytest.raises(ValueError, match=msg): + splt.transform([[5]]) + + +def test_spline_transformer_kbindiscretizer(): + """Test that a B-spline of degree=0 is equivalent to KBinsDiscretizer.""" + rng = np.random.RandomState(97531) + X = rng.randn(200).reshape(200, 1) + n_bins = 5 + n_knots = n_bins + 1 + + splt = SplineTransformer( + n_knots=n_knots, degree=0, knots="quantile", include_bias=True + ) + splines = splt.fit_transform(X) + + kbd = KBinsDiscretizer(n_bins=n_bins, encode="onehot-dense", strategy="quantile") + kbins = kbd.fit_transform(X) + + # Though they should be exactly equal, we test approximately with high + # accuracy. + assert_allclose(splines, kbins, rtol=1e-13) + + +@pytest.mark.skipif( + sp_version < parse_version("1.8.0"), + reason="The option `sparse_output` is available as of scipy 1.8.0", +) +@pytest.mark.parametrize("degree", range(1, 3)) +@pytest.mark.parametrize("knots", ["uniform", "quantile"]) +@pytest.mark.parametrize( + "extrapolation", ["error", "constant", "linear", "continue", "periodic"] +) +@pytest.mark.parametrize("include_bias", [False, True]) +def test_spline_transformer_sparse_output( + degree, knots, extrapolation, include_bias, global_random_seed +): + rng = np.random.RandomState(global_random_seed) + X = rng.randn(200).reshape(40, 5) + + splt_dense = SplineTransformer( + degree=degree, + knots=knots, + extrapolation=extrapolation, + include_bias=include_bias, + sparse_output=False, + ) + splt_sparse = SplineTransformer( + degree=degree, + knots=knots, + extrapolation=extrapolation, + include_bias=include_bias, + sparse_output=True, + ) + + splt_dense.fit(X) + splt_sparse.fit(X) + + X_trans_sparse = splt_sparse.transform(X) + X_trans_dense = splt_dense.transform(X) + assert sparse.issparse(X_trans_sparse) and X_trans_sparse.format == "csr" + assert_allclose(X_trans_dense, X_trans_sparse.toarray()) + + # extrapolation regime + X_min = np.amin(X, axis=0) + X_max = np.amax(X, axis=0) + X_extra = np.r_[ + np.linspace(X_min - 5, X_min, 10), np.linspace(X_max, X_max + 5, 10) + ] + if extrapolation == "error": + msg = "X contains values beyond the limits of the knots" + with pytest.raises(ValueError, match=msg): + splt_dense.transform(X_extra) + msg = "Out of bounds" + with pytest.raises(ValueError, match=msg): + splt_sparse.transform(X_extra) + else: + assert_allclose( + splt_dense.transform(X_extra), splt_sparse.transform(X_extra).toarray() + ) + + +@pytest.mark.skipif( + sp_version >= parse_version("1.8.0"), + reason="The option `sparse_output` is available as of scipy 1.8.0", +) +def test_spline_transformer_sparse_output_raise_error_for_old_scipy(): + """Test that SplineTransformer with sparse=True raises for scipy<1.8.0.""" + X = [[1], [2]] + with pytest.raises(ValueError, match="scipy>=1.8.0"): + SplineTransformer(sparse_output=True).fit(X) + + +@pytest.mark.parametrize("n_knots", [5, 10]) +@pytest.mark.parametrize("include_bias", [True, False]) +@pytest.mark.parametrize("degree", [3, 4]) +@pytest.mark.parametrize( + "extrapolation", ["error", "constant", "linear", "continue", "periodic"] +) +@pytest.mark.parametrize("sparse_output", [False, True]) +def test_spline_transformer_n_features_out( + n_knots, include_bias, degree, extrapolation, sparse_output +): + """Test that transform results in n_features_out_ features.""" + if sparse_output and sp_version < parse_version("1.8.0"): + pytest.skip("The option `sparse_output` is available as of scipy 1.8.0") + + splt = SplineTransformer( + n_knots=n_knots, + degree=degree, + include_bias=include_bias, + extrapolation=extrapolation, + sparse_output=sparse_output, + ) + X = np.linspace(0, 1, 10)[:, None] + splt.fit(X) + + assert splt.transform(X).shape[1] == splt.n_features_out_ + + +@pytest.mark.parametrize( + "params, err_msg", + [ + ({"degree": (-1, 2)}, r"degree=\(min_degree, max_degree\) must"), + ({"degree": (0, 1.5)}, r"degree=\(min_degree, max_degree\) must"), + ({"degree": (3, 2)}, r"degree=\(min_degree, max_degree\) must"), + ({"degree": (1, 2, 3)}, r"int or tuple \(min_degree, max_degree\)"), + ], +) +def test_polynomial_features_input_validation(params, err_msg): + """Test that we raise errors for invalid input in PolynomialFeatures.""" + X = [[1], [2]] + + with pytest.raises(ValueError, match=err_msg): + PolynomialFeatures(**params).fit(X) + + +@pytest.fixture() +def single_feature_degree3(): + X = np.arange(6)[:, np.newaxis] + P = np.hstack([np.ones_like(X), X, X**2, X**3]) + return X, P + + +@pytest.mark.parametrize( + "degree, include_bias, interaction_only, indices", + [ + (3, True, False, slice(None, None)), + (3, False, False, slice(1, None)), + (3, True, True, [0, 1]), + (3, False, True, [1]), + ((2, 3), True, False, [0, 2, 3]), + ((2, 3), False, False, [2, 3]), + ((2, 3), True, True, [0]), + ((2, 3), False, True, []), + ], +) +@pytest.mark.parametrize("X_container", [None] + CSR_CONTAINERS + CSC_CONTAINERS) +def test_polynomial_features_one_feature( + single_feature_degree3, + degree, + include_bias, + interaction_only, + indices, + X_container, +): + """Test PolynomialFeatures on single feature up to degree 3.""" + X, P = single_feature_degree3 + if X_container is not None: + X = X_container(X) + tf = PolynomialFeatures( + degree=degree, include_bias=include_bias, interaction_only=interaction_only + ).fit(X) + out = tf.transform(X) + if X_container is not None: + out = out.toarray() + assert_allclose(out, P[:, indices]) + if tf.n_output_features_ > 0: + assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_) + + +@pytest.fixture() +def two_features_degree3(): + X = np.arange(6).reshape((3, 2)) + x1 = X[:, :1] + x2 = X[:, 1:] + P = np.hstack( + [ + x1**0 * x2**0, # 0 + x1**1 * x2**0, # 1 + x1**0 * x2**1, # 2 + x1**2 * x2**0, # 3 + x1**1 * x2**1, # 4 + x1**0 * x2**2, # 5 + x1**3 * x2**0, # 6 + x1**2 * x2**1, # 7 + x1**1 * x2**2, # 8 + x1**0 * x2**3, # 9 + ] + ) + return X, P + + +@pytest.mark.parametrize( + "degree, include_bias, interaction_only, indices", + [ + (2, True, False, slice(0, 6)), + (2, False, False, slice(1, 6)), + (2, True, True, [0, 1, 2, 4]), + (2, False, True, [1, 2, 4]), + ((2, 2), True, False, [0, 3, 4, 5]), + ((2, 2), False, False, [3, 4, 5]), + ((2, 2), True, True, [0, 4]), + ((2, 2), False, True, [4]), + (3, True, False, slice(None, None)), + (3, False, False, slice(1, None)), + (3, True, True, [0, 1, 2, 4]), + (3, False, True, [1, 2, 4]), + ((2, 3), True, False, [0, 3, 4, 5, 6, 7, 8, 9]), + ((2, 3), False, False, slice(3, None)), + ((2, 3), True, True, [0, 4]), + ((2, 3), False, True, [4]), + ((3, 3), True, False, [0, 6, 7, 8, 9]), + ((3, 3), False, False, [6, 7, 8, 9]), + ((3, 3), True, True, [0]), + ((3, 3), False, True, []), # would need 3 input features + ], +) +@pytest.mark.parametrize("X_container", [None] + CSR_CONTAINERS + CSC_CONTAINERS) +def test_polynomial_features_two_features( + two_features_degree3, + degree, + include_bias, + interaction_only, + indices, + X_container, +): + """Test PolynomialFeatures on 2 features up to degree 3.""" + X, P = two_features_degree3 + if X_container is not None: + X = X_container(X) + tf = PolynomialFeatures( + degree=degree, include_bias=include_bias, interaction_only=interaction_only + ).fit(X) + out = tf.transform(X) + if X_container is not None: + out = out.toarray() + assert_allclose(out, P[:, indices]) + if tf.n_output_features_ > 0: + assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_) + + +def test_polynomial_feature_names(): + X = np.arange(30).reshape(10, 3) + poly = PolynomialFeatures(degree=2, include_bias=True).fit(X) + feature_names = poly.get_feature_names_out() + assert_array_equal( + ["1", "x0", "x1", "x2", "x0^2", "x0 x1", "x0 x2", "x1^2", "x1 x2", "x2^2"], + feature_names, + ) + assert len(feature_names) == poly.transform(X).shape[1] + + poly = PolynomialFeatures(degree=3, include_bias=False).fit(X) + feature_names = poly.get_feature_names_out(["a", "b", "c"]) + assert_array_equal( + [ + "a", + "b", + "c", + "a^2", + "a b", + "a c", + "b^2", + "b c", + "c^2", + "a^3", + "a^2 b", + "a^2 c", + "a b^2", + "a b c", + "a c^2", + "b^3", + "b^2 c", + "b c^2", + "c^3", + ], + feature_names, + ) + assert len(feature_names) == poly.transform(X).shape[1] + + poly = PolynomialFeatures(degree=(2, 3), include_bias=False).fit(X) + feature_names = poly.get_feature_names_out(["a", "b", "c"]) + assert_array_equal( + [ + "a^2", + "a b", + "a c", + "b^2", + "b c", + "c^2", + "a^3", + "a^2 b", + "a^2 c", + "a b^2", + "a b c", + "a c^2", + "b^3", + "b^2 c", + "b c^2", + "c^3", + ], + feature_names, + ) + assert len(feature_names) == poly.transform(X).shape[1] + + poly = PolynomialFeatures( + degree=(3, 3), include_bias=True, interaction_only=True + ).fit(X) + feature_names = poly.get_feature_names_out(["a", "b", "c"]) + assert_array_equal(["1", "a b c"], feature_names) + assert len(feature_names) == poly.transform(X).shape[1] + + # test some unicode + poly = PolynomialFeatures(degree=1, include_bias=True).fit(X) + feature_names = poly.get_feature_names_out(["\u0001F40D", "\u262e", "\u05d0"]) + assert_array_equal(["1", "\u0001F40D", "\u262e", "\u05d0"], feature_names) + + +@pytest.mark.parametrize( + ["deg", "include_bias", "interaction_only", "dtype"], + [ + (1, True, False, int), + (2, True, False, int), + (2, True, False, np.float32), + (2, True, False, np.float64), + (3, False, False, np.float64), + (3, False, True, np.float64), + (4, False, False, np.float64), + (4, False, True, np.float64), + ], +) +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_polynomial_features_csc_X( + deg, include_bias, interaction_only, dtype, csc_container +): + rng = np.random.RandomState(0) + X = rng.randint(0, 2, (100, 2)) + X_csc = csc_container(X) + + est = PolynomialFeatures( + deg, include_bias=include_bias, interaction_only=interaction_only + ) + Xt_csc = est.fit_transform(X_csc.astype(dtype)) + Xt_dense = est.fit_transform(X.astype(dtype)) + + assert sparse.issparse(Xt_csc) and Xt_csc.format == "csc" + assert Xt_csc.dtype == Xt_dense.dtype + assert_array_almost_equal(Xt_csc.toarray(), Xt_dense) + + +@pytest.mark.parametrize( + ["deg", "include_bias", "interaction_only", "dtype"], + [ + (1, True, False, int), + (2, True, False, int), + (2, True, False, np.float32), + (2, True, False, np.float64), + (3, False, False, np.float64), + (3, False, True, np.float64), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_polynomial_features_csr_X( + deg, include_bias, interaction_only, dtype, csr_container +): + rng = np.random.RandomState(0) + X = rng.randint(0, 2, (100, 2)) + X_csr = csr_container(X) + + est = PolynomialFeatures( + deg, include_bias=include_bias, interaction_only=interaction_only + ) + Xt_csr = est.fit_transform(X_csr.astype(dtype)) + Xt_dense = est.fit_transform(X.astype(dtype, copy=False)) + + assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" + assert Xt_csr.dtype == Xt_dense.dtype + assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) + + +@pytest.mark.parametrize("n_features", [1, 4, 5]) +@pytest.mark.parametrize( + "min_degree, max_degree", [(0, 1), (0, 2), (1, 3), (0, 4), (3, 4)] +) +@pytest.mark.parametrize("interaction_only", [True, False]) +@pytest.mark.parametrize("include_bias", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_num_combinations( + n_features, min_degree, max_degree, interaction_only, include_bias, csr_container +): + """ + Test that n_output_features_ is calculated correctly. + """ + x = csr_container(([1], ([0], [n_features - 1]))) + est = PolynomialFeatures( + degree=max_degree, + interaction_only=interaction_only, + include_bias=include_bias, + ) + est.fit(x) + num_combos = est.n_output_features_ + + combos = PolynomialFeatures._combinations( + n_features=n_features, + min_degree=0, + max_degree=max_degree, + interaction_only=interaction_only, + include_bias=include_bias, + ) + assert num_combos == sum([1 for _ in combos]) + + +@pytest.mark.parametrize( + ["deg", "include_bias", "interaction_only", "dtype"], + [ + (2, True, False, np.float32), + (2, True, False, np.float64), + (3, False, False, np.float64), + (3, False, True, np.float64), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_polynomial_features_csr_X_floats( + deg, include_bias, interaction_only, dtype, csr_container +): + X_csr = csr_container(sparse_random(1000, 10, 0.5, random_state=0)) + X = X_csr.toarray() + + est = PolynomialFeatures( + deg, include_bias=include_bias, interaction_only=interaction_only + ) + Xt_csr = est.fit_transform(X_csr.astype(dtype)) + Xt_dense = est.fit_transform(X.astype(dtype)) + + assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" + assert Xt_csr.dtype == Xt_dense.dtype + assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) + + +@pytest.mark.parametrize( + ["zero_row_index", "deg", "interaction_only"], + [ + (0, 2, True), + (1, 2, True), + (2, 2, True), + (0, 3, True), + (1, 3, True), + (2, 3, True), + (0, 2, False), + (1, 2, False), + (2, 2, False), + (0, 3, False), + (1, 3, False), + (2, 3, False), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_polynomial_features_csr_X_zero_row( + zero_row_index, deg, interaction_only, csr_container +): + X_csr = csr_container(sparse_random(3, 10, 1.0, random_state=0)) + X_csr[zero_row_index, :] = 0.0 + X = X_csr.toarray() + + est = PolynomialFeatures(deg, include_bias=False, interaction_only=interaction_only) + Xt_csr = est.fit_transform(X_csr) + Xt_dense = est.fit_transform(X) + + assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" + assert Xt_csr.dtype == Xt_dense.dtype + assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) + + +# This degree should always be one more than the highest degree supported by +# _csr_expansion. +@pytest.mark.parametrize( + ["include_bias", "interaction_only"], + [(True, True), (True, False), (False, True), (False, False)], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_polynomial_features_csr_X_degree_4( + include_bias, interaction_only, csr_container +): + X_csr = csr_container(sparse_random(1000, 10, 0.5, random_state=0)) + X = X_csr.toarray() + + est = PolynomialFeatures( + 4, include_bias=include_bias, interaction_only=interaction_only + ) + Xt_csr = est.fit_transform(X_csr) + Xt_dense = est.fit_transform(X) + + assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" + assert Xt_csr.dtype == Xt_dense.dtype + assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) + + +@pytest.mark.parametrize( + ["deg", "dim", "interaction_only"], + [ + (2, 1, True), + (2, 2, True), + (3, 1, True), + (3, 2, True), + (3, 3, True), + (2, 1, False), + (2, 2, False), + (3, 1, False), + (3, 2, False), + (3, 3, False), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_polynomial_features_csr_X_dim_edges(deg, dim, interaction_only, csr_container): + X_csr = csr_container(sparse_random(1000, dim, 0.5, random_state=0)) + X = X_csr.toarray() + + est = PolynomialFeatures(deg, interaction_only=interaction_only) + Xt_csr = est.fit_transform(X_csr) + Xt_dense = est.fit_transform(X) + + assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" + assert Xt_csr.dtype == Xt_dense.dtype + assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) + + +@pytest.mark.parametrize("interaction_only", [True, False]) +@pytest.mark.parametrize("include_bias", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_csr_polynomial_expansion_index_overflow_non_regression( + interaction_only, include_bias, csr_container +): + """Check the automatic index dtype promotion to `np.int64` when needed. + + This ensures that sufficiently large input configurations get + properly promoted to use `np.int64` for index and indptr representation + while preserving data integrity. Non-regression test for gh-16803. + + Note that this is only possible for Python runtimes with a 64 bit address + space. On 32 bit platforms, a `ValueError` is raised instead. + """ + + def degree_2_calc(d, i, j): + if interaction_only: + return d * i - (i**2 + 3 * i) // 2 - 1 + j + else: + return d * i - (i**2 + i) // 2 + j + + n_samples = 13 + n_features = 120001 + data_dtype = np.float32 + data = np.arange(1, 5, dtype=np.int64) + row = np.array([n_samples - 2, n_samples - 2, n_samples - 1, n_samples - 1]) + # An int64 dtype is required to avoid overflow error on Windows within the + # `degree_2_calc` function. + col = np.array( + [n_features - 2, n_features - 1, n_features - 2, n_features - 1], dtype=np.int64 + ) + X = csr_container( + (data, (row, col)), + shape=(n_samples, n_features), + dtype=data_dtype, + ) + pf = PolynomialFeatures( + interaction_only=interaction_only, include_bias=include_bias, degree=2 + ) + + # Calculate the number of combinations a-priori, and if needed check for + # the correct ValueError and terminate the test early. + num_combinations = pf._num_combinations( + n_features=n_features, + min_degree=0, + max_degree=2, + interaction_only=pf.interaction_only, + include_bias=pf.include_bias, + ) + if num_combinations > np.iinfo(np.intp).max: + msg = ( + r"The output that would result from the current configuration would have" + r" \d* features which is too large to be indexed" + ) + with pytest.raises(ValueError, match=msg): + pf.fit(X) + return + X_trans = pf.fit_transform(X) + row_nonzero, col_nonzero = X_trans.nonzero() + n_degree_1_features_out = n_features + include_bias + max_degree_2_idx = ( + degree_2_calc(n_features, col[int(not interaction_only)], col[1]) + + n_degree_1_features_out + ) + + # Account for bias of all samples except last one which will be handled + # separately since there are distinct data values before it + data_target = [1] * (n_samples - 2) if include_bias else [] + col_nonzero_target = [0] * (n_samples - 2) if include_bias else [] + + for i in range(2): + x = data[2 * i] + y = data[2 * i + 1] + x_idx = col[2 * i] + y_idx = col[2 * i + 1] + if include_bias: + data_target.append(1) + col_nonzero_target.append(0) + data_target.extend([x, y]) + col_nonzero_target.extend( + [x_idx + int(include_bias), y_idx + int(include_bias)] + ) + if not interaction_only: + data_target.extend([x * x, x * y, y * y]) + col_nonzero_target.extend( + [ + degree_2_calc(n_features, x_idx, x_idx) + n_degree_1_features_out, + degree_2_calc(n_features, x_idx, y_idx) + n_degree_1_features_out, + degree_2_calc(n_features, y_idx, y_idx) + n_degree_1_features_out, + ] + ) + else: + data_target.extend([x * y]) + col_nonzero_target.append( + degree_2_calc(n_features, x_idx, y_idx) + n_degree_1_features_out + ) + + nnz_per_row = int(include_bias) + 3 + 2 * int(not interaction_only) + + assert pf.n_output_features_ == max_degree_2_idx + 1 + assert X_trans.dtype == data_dtype + assert X_trans.shape == (n_samples, max_degree_2_idx + 1) + assert X_trans.indptr.dtype == X_trans.indices.dtype == np.int64 + # Ensure that dtype promotion was actually required: + assert X_trans.indices.max() > np.iinfo(np.int32).max + + row_nonzero_target = list(range(n_samples - 2)) if include_bias else [] + row_nonzero_target.extend( + [n_samples - 2] * nnz_per_row + [n_samples - 1] * nnz_per_row + ) + + assert_allclose(X_trans.data, data_target) + assert_array_equal(row_nonzero, row_nonzero_target) + assert_array_equal(col_nonzero, col_nonzero_target) + + +@pytest.mark.parametrize( + "degree, n_features", + [ + # Needs promotion to int64 when interaction_only=False + (2, 65535), + (3, 2344), + # This guarantees that the intermediate operation when calculating + # output columns would overflow a C-long, hence checks that python- + # longs are being used. + (2, int(np.sqrt(np.iinfo(np.int64).max) + 1)), + (3, 65535), + # This case tests the second clause of the overflow check which + # takes into account the value of `n_features` itself. + (2, int(np.sqrt(np.iinfo(np.int64).max))), + ], +) +@pytest.mark.parametrize("interaction_only", [True, False]) +@pytest.mark.parametrize("include_bias", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_csr_polynomial_expansion_index_overflow( + degree, n_features, interaction_only, include_bias, csr_container +): + """Tests known edge-cases to the dtype promotion strategy and custom + Cython code, including a current bug in the upstream + `scipy.sparse.hstack`. + """ + data = [1.0] + row = [0] + col = [n_features - 1] + + # First degree index + expected_indices = [ + n_features - 1 + int(include_bias), + ] + # Second degree index + expected_indices.append(n_features * (n_features + 1) // 2 + expected_indices[0]) + # Third degree index + expected_indices.append( + n_features * (n_features + 1) * (n_features + 2) // 6 + expected_indices[1] + ) + + X = csr_container((data, (row, col))) + pf = PolynomialFeatures( + interaction_only=interaction_only, include_bias=include_bias, degree=degree + ) + + # Calculate the number of combinations a-priori, and if needed check for + # the correct ValueError and terminate the test early. + num_combinations = pf._num_combinations( + n_features=n_features, + min_degree=0, + max_degree=degree, + interaction_only=pf.interaction_only, + include_bias=pf.include_bias, + ) + if num_combinations > np.iinfo(np.intp).max: + msg = ( + r"The output that would result from the current configuration would have" + r" \d* features which is too large to be indexed" + ) + with pytest.raises(ValueError, match=msg): + pf.fit(X) + return + + # In SciPy < 1.8, a bug occurs when an intermediate matrix in + # `to_stack` in `hstack` fits within int32 however would require int64 when + # combined with all previous matrices in `to_stack`. + if sp_version < parse_version("1.8.0"): + has_bug = False + max_int32 = np.iinfo(np.int32).max + cumulative_size = n_features + include_bias + for deg in range(2, degree + 1): + max_indptr = _calc_total_nnz(X.indptr, interaction_only, deg) + max_indices = _calc_expanded_nnz(n_features, interaction_only, deg) - 1 + cumulative_size += max_indices + 1 + needs_int64 = max(max_indices, max_indptr) > max_int32 + has_bug |= not needs_int64 and cumulative_size > max_int32 + if has_bug: + msg = r"In scipy versions `<1.8.0`, the function `scipy.sparse.hstack`" + with pytest.raises(ValueError, match=msg): + X_trans = pf.fit_transform(X) + return + + # When `n_features>=65535`, `scipy.sparse.hstack` may not use the right + # dtype for representing indices and indptr if `n_features` is still + # small enough so that each block matrix's indices and indptr arrays + # can be represented with `np.int32`. We test `n_features==65535` + # since it is guaranteed to run into this bug. + if ( + sp_version < parse_version("1.9.2") + and n_features == 65535 + and degree == 2 + and not interaction_only + ): # pragma: no cover + msg = r"In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`" + with pytest.raises(ValueError, match=msg): + X_trans = pf.fit_transform(X) + return + X_trans = pf.fit_transform(X) + + expected_dtype = np.int64 if num_combinations > np.iinfo(np.int32).max else np.int32 + # Terms higher than first degree + non_bias_terms = 1 + (degree - 1) * int(not interaction_only) + expected_nnz = int(include_bias) + non_bias_terms + assert X_trans.dtype == X.dtype + assert X_trans.shape == (1, pf.n_output_features_) + assert X_trans.indptr.dtype == X_trans.indices.dtype == expected_dtype + assert X_trans.nnz == expected_nnz + + if include_bias: + assert X_trans[0, 0] == pytest.approx(1.0) + for idx in range(non_bias_terms): + assert X_trans[0, expected_indices[idx]] == pytest.approx(1.0) + + offset = interaction_only * n_features + if degree == 3: + offset *= 1 + n_features + assert pf.n_output_features_ == expected_indices[degree - 1] + 1 - offset + + +@pytest.mark.parametrize("interaction_only", [True, False]) +@pytest.mark.parametrize("include_bias", [True, False]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_csr_polynomial_expansion_too_large_to_index( + interaction_only, include_bias, csr_container +): + n_features = np.iinfo(np.int64).max // 2 + data = [1.0] + row = [0] + col = [n_features - 1] + X = csr_container((data, (row, col))) + pf = PolynomialFeatures( + interaction_only=interaction_only, include_bias=include_bias, degree=(2, 2) + ) + msg = ( + r"The output that would result from the current configuration would have \d*" + r" features which is too large to be indexed" + ) + with pytest.raises(ValueError, match=msg): + pf.fit(X) + with pytest.raises(ValueError, match=msg): + pf.fit_transform(X) + + +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_polynomial_features_behaviour_on_zero_degree(sparse_container): + """Check that PolynomialFeatures raises error when degree=0 and include_bias=False, + and output a single constant column when include_bias=True + """ + X = np.ones((10, 2)) + poly = PolynomialFeatures(degree=0, include_bias=False) + err_msg = ( + "Setting degree to zero and include_bias to False would result in" + " an empty output array." + ) + with pytest.raises(ValueError, match=err_msg): + poly.fit_transform(X) + + poly = PolynomialFeatures(degree=(0, 0), include_bias=False) + err_msg = ( + "Setting both min_degree and max_degree to zero and include_bias to" + " False would result in an empty output array." + ) + with pytest.raises(ValueError, match=err_msg): + poly.fit_transform(X) + + for _X in [X, sparse_container(X)]: + poly = PolynomialFeatures(degree=0, include_bias=True) + output = poly.fit_transform(_X) + # convert to dense array if needed + if sparse.issparse(output): + output = output.toarray() + assert_array_equal(output, np.ones((X.shape[0], 1))) + + +def test_sizeof_LARGEST_INT_t(): + # On Windows, scikit-learn is typically compiled with MSVC that + # does not support int128 arithmetic (at the time of writing): + # https://stackoverflow.com/a/6761962/163740 + if sys.platform == "win32" or ( + sys.maxsize <= 2**32 and sys.platform != "emscripten" + ): + expected_size = 8 + else: + expected_size = 16 + + assert _get_sizeof_LARGEST_INT_t() == expected_size + + +@pytest.mark.xfail( + sys.platform == "win32", + reason=( + "On Windows, scikit-learn is typically compiled with MSVC that does not support" + " int128 arithmetic (at the time of writing)" + ), + run=True, +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_csr_polynomial_expansion_windows_fail(csr_container): + # Minimum needed to ensure integer overflow occurs while guaranteeing an + # int64-indexable output. + n_features = int(np.iinfo(np.int64).max ** (1 / 3) + 3) + data = [1.0] + row = [0] + col = [n_features - 1] + + # First degree index + expected_indices = [ + n_features - 1, + ] + # Second degree index + expected_indices.append( + int(n_features * (n_features + 1) // 2 + expected_indices[0]) + ) + # Third degree index + expected_indices.append( + int(n_features * (n_features + 1) * (n_features + 2) // 6 + expected_indices[1]) + ) + + X = csr_container((data, (row, col))) + pf = PolynomialFeatures(interaction_only=False, include_bias=False, degree=3) + if sys.maxsize <= 2**32: + msg = ( + r"The output that would result from the current configuration would" + r" have \d*" + r" features which is too large to be indexed" + ) + with pytest.raises(ValueError, match=msg): + pf.fit_transform(X) + else: + X_trans = pf.fit_transform(X) + for idx in range(3): + assert X_trans[0, expected_indices[idx]] == pytest.approx(1.0) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_target_encoder.py b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_target_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..81b0f32d04d685883f8b2cad08e7df02bcc77edd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_target_encoder.py @@ -0,0 +1,716 @@ +import re + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from sklearn.ensemble import RandomForestRegressor +from sklearn.linear_model import Ridge +from sklearn.model_selection import ( + KFold, + ShuffleSplit, + StratifiedKFold, + cross_val_score, + train_test_split, +) +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import ( + KBinsDiscretizer, + LabelBinarizer, + LabelEncoder, + TargetEncoder, +) + + +def _encode_target(X_ordinal, y_numeric, n_categories, smooth): + """Simple Python implementation of target encoding.""" + cur_encodings = np.zeros(n_categories, dtype=np.float64) + y_mean = np.mean(y_numeric) + + if smooth == "auto": + y_variance = np.var(y_numeric) + for c in range(n_categories): + y_subset = y_numeric[X_ordinal == c] + n_i = y_subset.shape[0] + + if n_i == 0: + cur_encodings[c] = y_mean + continue + + y_subset_variance = np.var(y_subset) + m = y_subset_variance / y_variance + lambda_ = n_i / (n_i + m) + + cur_encodings[c] = lambda_ * np.mean(y_subset) + (1 - lambda_) * y_mean + return cur_encodings + else: # float + for c in range(n_categories): + y_subset = y_numeric[X_ordinal == c] + current_sum = np.sum(y_subset) + y_mean * smooth + current_cnt = y_subset.shape[0] + smooth + cur_encodings[c] = current_sum / current_cnt + return cur_encodings + + +@pytest.mark.parametrize( + "categories, unknown_value", + [ + ([np.array([0, 1, 2], dtype=np.int64)], 4), + ([np.array([1.0, 3.0, np.nan], dtype=np.float64)], 6.0), + ([np.array(["cat", "dog", "snake"], dtype=object)], "bear"), + ("auto", 3), + ], +) +@pytest.mark.parametrize("smooth", [5.0, "auto"]) +@pytest.mark.parametrize("target_type", ["binary", "continuous"]) +def test_encoding(categories, unknown_value, global_random_seed, smooth, target_type): + """Check encoding for binary and continuous targets. + + Compare the values returned by `TargetEncoder.fit_transform` against the + expected encodings for cv splits from a naive reference Python + implementation in _encode_target. + """ + + n_categories = 3 + X_train_int_array = np.array([[0] * 20 + [1] * 30 + [2] * 40], dtype=np.int64).T + X_test_int_array = np.array([[0, 1, 2]], dtype=np.int64).T + n_samples = X_train_int_array.shape[0] + + if categories == "auto": + X_train = X_train_int_array + X_test = X_test_int_array + else: + X_train = categories[0][X_train_int_array] + X_test = categories[0][X_test_int_array] + + X_test = np.concatenate((X_test, [[unknown_value]])) + + data_rng = np.random.RandomState(global_random_seed) + n_splits = 3 + if target_type == "binary": + y_numeric = data_rng.randint(low=0, high=2, size=n_samples) + target_names = np.array(["cat", "dog"], dtype=object) + y_train = target_names[y_numeric] + + else: + assert target_type == "continuous" + y_numeric = data_rng.uniform(low=-10, high=20, size=n_samples) + y_train = y_numeric + + shuffled_idx = data_rng.permutation(n_samples) + X_train_int_array = X_train_int_array[shuffled_idx] + X_train = X_train[shuffled_idx] + y_train = y_train[shuffled_idx] + y_numeric = y_numeric[shuffled_idx] + + # Define our CV splitting strategy + if target_type == "binary": + cv = StratifiedKFold( + n_splits=n_splits, random_state=global_random_seed, shuffle=True + ) + else: + cv = KFold(n_splits=n_splits, random_state=global_random_seed, shuffle=True) + + # Compute the expected values using our reference Python implementation of + # target encoding: + expected_X_fit_transform = np.empty_like(X_train_int_array, dtype=np.float64) + + for train_idx, test_idx in cv.split(X_train_int_array, y_train): + X_, y_ = X_train_int_array[train_idx, 0], y_numeric[train_idx] + cur_encodings = _encode_target(X_, y_, n_categories, smooth) + expected_X_fit_transform[test_idx, 0] = cur_encodings[ + X_train_int_array[test_idx, 0] + ] + + # Check that we can obtain the same encodings by calling `fit_transform` on + # the estimator with the same CV parameters: + target_encoder = TargetEncoder( + smooth=smooth, + categories=categories, + cv=n_splits, + random_state=global_random_seed, + ) + + X_fit_transform = target_encoder.fit_transform(X_train, y_train) + + assert target_encoder.target_type_ == target_type + assert_allclose(X_fit_transform, expected_X_fit_transform) + assert len(target_encoder.encodings_) == 1 + if target_type == "binary": + assert_array_equal(target_encoder.classes_, target_names) + else: + assert target_encoder.classes_ is None + + # compute encodings for all data to validate `transform` + y_mean = np.mean(y_numeric) + expected_encodings = _encode_target( + X_train_int_array[:, 0], y_numeric, n_categories, smooth + ) + assert_allclose(target_encoder.encodings_[0], expected_encodings) + assert target_encoder.target_mean_ == pytest.approx(y_mean) + + # Transform on test data, the last value is unknown so it is encoded as the target + # mean + expected_X_test_transform = np.concatenate( + (expected_encodings, np.array([y_mean])) + ).reshape(-1, 1) + + X_test_transform = target_encoder.transform(X_test) + assert_allclose(X_test_transform, expected_X_test_transform) + + +@pytest.mark.parametrize( + "categories, unknown_values", + [ + ([np.array([0, 1, 2], dtype=np.int64)], "auto"), + ([np.array(["cat", "dog", "snake"], dtype=object)], ["bear", "rabbit"]), + ], +) +@pytest.mark.parametrize( + "target_labels", [np.array([1, 2, 3]), np.array(["a", "b", "c"])] +) +@pytest.mark.parametrize("smooth", [5.0, "auto"]) +def test_encoding_multiclass( + global_random_seed, categories, unknown_values, target_labels, smooth +): + """Check encoding for multiclass targets.""" + rng = np.random.RandomState(global_random_seed) + + n_samples = 80 + n_features = 2 + feat_1_int = np.array(rng.randint(low=0, high=2, size=n_samples)) + feat_2_int = np.array(rng.randint(low=0, high=3, size=n_samples)) + feat_1 = categories[0][feat_1_int] + feat_2 = categories[0][feat_2_int] + X_train = np.column_stack((feat_1, feat_2)) + X_train_int = np.column_stack((feat_1_int, feat_2_int)) + categories_ = [[0, 1], [0, 1, 2]] + + n_classes = 3 + y_train_int = np.array(rng.randint(low=0, high=n_classes, size=n_samples)) + y_train = target_labels[y_train_int] + y_train_enc = LabelBinarizer().fit_transform(y_train) + + n_splits = 3 + cv = StratifiedKFold( + n_splits=n_splits, random_state=global_random_seed, shuffle=True + ) + + # Manually compute encodings for cv splits to validate `fit_transform` + expected_X_fit_transform = np.empty( + (X_train_int.shape[0], X_train_int.shape[1] * n_classes), + dtype=np.float64, + ) + for f_idx, cats in enumerate(categories_): + for c_idx in range(n_classes): + for train_idx, test_idx in cv.split(X_train, y_train): + y_class = y_train_enc[:, c_idx] + X_, y_ = X_train_int[train_idx, f_idx], y_class[train_idx] + current_encoding = _encode_target(X_, y_, len(cats), smooth) + # f_idx: 0, 0, 0, 1, 1, 1 + # c_idx: 0, 1, 2, 0, 1, 2 + # exp_idx: 0, 1, 2, 3, 4, 5 + exp_idx = c_idx + (f_idx * n_classes) + expected_X_fit_transform[test_idx, exp_idx] = current_encoding[ + X_train_int[test_idx, f_idx] + ] + + target_encoder = TargetEncoder( + smooth=smooth, + cv=n_splits, + random_state=global_random_seed, + ) + X_fit_transform = target_encoder.fit_transform(X_train, y_train) + + assert target_encoder.target_type_ == "multiclass" + assert_allclose(X_fit_transform, expected_X_fit_transform) + + # Manually compute encoding to validate `transform` + expected_encodings = [] + for f_idx, cats in enumerate(categories_): + for c_idx in range(n_classes): + y_class = y_train_enc[:, c_idx] + current_encoding = _encode_target( + X_train_int[:, f_idx], y_class, len(cats), smooth + ) + expected_encodings.append(current_encoding) + + assert len(target_encoder.encodings_) == n_features * n_classes + for i in range(n_features * n_classes): + assert_allclose(target_encoder.encodings_[i], expected_encodings[i]) + assert_array_equal(target_encoder.classes_, target_labels) + + # Include unknown values at the end + X_test_int = np.array([[0, 1], [1, 2], [4, 5]]) + if unknown_values == "auto": + X_test = X_test_int + else: + X_test = np.empty_like(X_test_int[:-1, :], dtype=object) + for column_idx in range(X_test_int.shape[1]): + X_test[:, column_idx] = categories[0][X_test_int[:-1, column_idx]] + # Add unknown values at end + X_test = np.vstack((X_test, unknown_values)) + + y_mean = np.mean(y_train_enc, axis=0) + expected_X_test_transform = np.empty( + (X_test_int.shape[0], X_test_int.shape[1] * n_classes), + dtype=np.float64, + ) + n_rows = X_test_int.shape[0] + f_idx = [0, 0, 0, 1, 1, 1] + # Last row are unknowns, dealt with later + for row_idx in range(n_rows - 1): + for i, enc in enumerate(expected_encodings): + expected_X_test_transform[row_idx, i] = enc[X_test_int[row_idx, f_idx[i]]] + + # Unknowns encoded as target mean for each class + # `y_mean` contains target mean for each class, thus cycle through mean of + # each class, `n_features` times + mean_idx = [0, 1, 2, 0, 1, 2] + for i in range(n_classes * n_features): + expected_X_test_transform[n_rows - 1, i] = y_mean[mean_idx[i]] + + X_test_transform = target_encoder.transform(X_test) + assert_allclose(X_test_transform, expected_X_test_transform) + + +@pytest.mark.parametrize( + "X, categories", + [ + ( + np.array([[0] * 10 + [1] * 10 + [3]], dtype=np.int64).T, # 3 is unknown + [[0, 1, 2]], + ), + ( + np.array( + [["cat"] * 10 + ["dog"] * 10 + ["snake"]], dtype=object + ).T, # snake is unknown + [["dog", "cat", "cow"]], + ), + ], +) +@pytest.mark.parametrize("smooth", [4.0, "auto"]) +def test_custom_categories(X, categories, smooth): + """Custom categories with unknown categories that are not in training data.""" + rng = np.random.RandomState(0) + y = rng.uniform(low=-10, high=20, size=X.shape[0]) + enc = TargetEncoder(categories=categories, smooth=smooth, random_state=0).fit(X, y) + + # The last element is unknown and encoded as the mean + y_mean = y.mean() + X_trans = enc.transform(X[-1:]) + assert X_trans[0, 0] == pytest.approx(y_mean) + + assert len(enc.encodings_) == 1 + # custom category that is not in training data + assert enc.encodings_[0][-1] == pytest.approx(y_mean) + + +@pytest.mark.parametrize( + "y, msg", + [ + ([1, 2, 0, 1], "Found input variables with inconsistent"), + ( + np.array([[1, 2, 0], [1, 2, 3]]).T, + "Target type was inferred to be 'multiclass-multioutput'", + ), + ], +) +def test_errors(y, msg): + """Check invalidate input.""" + X = np.array([[1, 0, 1]]).T + + enc = TargetEncoder() + with pytest.raises(ValueError, match=msg): + enc.fit_transform(X, y) + + +def test_use_regression_target(): + """Check inferred and specified `target_type` on regression target.""" + X = np.array([[0, 1, 0, 1, 0, 1]]).T + y = np.array([1.0, 2.0, 3.0, 2.0, 3.0, 4.0]) + + enc = TargetEncoder(cv=2) + with pytest.warns( + UserWarning, + match=re.escape( + "The least populated class in y has only 1 members, which is less than" + " n_splits=2." + ), + ): + enc.fit_transform(X, y) + assert enc.target_type_ == "multiclass" + + enc = TargetEncoder(cv=2, target_type="continuous") + enc.fit_transform(X, y) + assert enc.target_type_ == "continuous" + + +@pytest.mark.parametrize( + "y, feature_names", + [ + ([1, 2] * 10, ["A", "B"]), + ([1, 2, 3] * 6 + [1, 2], ["A_1", "A_2", "A_3", "B_1", "B_2", "B_3"]), + ( + ["y1", "y2", "y3"] * 6 + ["y1", "y2"], + ["A_y1", "A_y2", "A_y3", "B_y1", "B_y2", "B_y3"], + ), + ], +) +def test_feature_names_out_set_output(y, feature_names): + """Check TargetEncoder works with set_output.""" + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"] * 10, "B": [1, 2] * 10}) + + enc_default = TargetEncoder(cv=2, smooth=3.0, random_state=0) + enc_default.set_output(transform="default") + enc_pandas = TargetEncoder(cv=2, smooth=3.0, random_state=0) + enc_pandas.set_output(transform="pandas") + + X_default = enc_default.fit_transform(X_df, y) + X_pandas = enc_pandas.fit_transform(X_df, y) + + assert_allclose(X_pandas.to_numpy(), X_default) + assert_array_equal(enc_pandas.get_feature_names_out(), feature_names) + assert_array_equal(enc_pandas.get_feature_names_out(), X_pandas.columns) + + +@pytest.mark.parametrize("to_pandas", [True, False]) +@pytest.mark.parametrize("smooth", [1.0, "auto"]) +@pytest.mark.parametrize("target_type", ["binary-ints", "binary-str", "continuous"]) +def test_multiple_features_quick(to_pandas, smooth, target_type): + """Check target encoder with multiple features.""" + X_ordinal = np.array( + [[1, 1], [0, 1], [1, 1], [2, 1], [1, 0], [0, 1], [1, 0], [0, 0]], dtype=np.int64 + ) + if target_type == "binary-str": + y_train = np.array(["a", "b", "a", "a", "b", "b", "a", "b"]) + y_integer = LabelEncoder().fit_transform(y_train) + cv = StratifiedKFold(2, random_state=0, shuffle=True) + elif target_type == "binary-ints": + y_train = np.array([3, 4, 3, 3, 3, 4, 4, 4]) + y_integer = LabelEncoder().fit_transform(y_train) + cv = StratifiedKFold(2, random_state=0, shuffle=True) + else: + y_train = np.array([3.0, 5.1, 2.4, 3.5, 4.1, 5.5, 10.3, 7.3], dtype=np.float32) + y_integer = y_train + cv = KFold(2, random_state=0, shuffle=True) + y_mean = np.mean(y_integer) + categories = [[0, 1, 2], [0, 1]] + + X_test = np.array( + [ + [0, 1], + [3, 0], # 3 is unknown + [1, 10], # 10 is unknown + ], + dtype=np.int64, + ) + + if to_pandas: + pd = pytest.importorskip("pandas") + # convert second feature to an object + X_train = pd.DataFrame( + { + "feat0": X_ordinal[:, 0], + "feat1": np.array(["cat", "dog"], dtype=object)[X_ordinal[:, 1]], + } + ) + # "snake" is unknown + X_test = pd.DataFrame({"feat0": X_test[:, 0], "feat1": ["dog", "cat", "snake"]}) + else: + X_train = X_ordinal + + # manually compute encoding for fit_transform + expected_X_fit_transform = np.empty_like(X_ordinal, dtype=np.float64) + for f_idx, cats in enumerate(categories): + for train_idx, test_idx in cv.split(X_ordinal, y_integer): + X_, y_ = X_ordinal[train_idx, f_idx], y_integer[train_idx] + current_encoding = _encode_target(X_, y_, len(cats), smooth) + expected_X_fit_transform[test_idx, f_idx] = current_encoding[ + X_ordinal[test_idx, f_idx] + ] + + # manually compute encoding for transform + expected_encodings = [] + for f_idx, cats in enumerate(categories): + current_encoding = _encode_target( + X_ordinal[:, f_idx], y_integer, len(cats), smooth + ) + expected_encodings.append(current_encoding) + + expected_X_test_transform = np.array( + [ + [expected_encodings[0][0], expected_encodings[1][1]], + [y_mean, expected_encodings[1][0]], + [expected_encodings[0][1], y_mean], + ], + dtype=np.float64, + ) + + enc = TargetEncoder(smooth=smooth, cv=2, random_state=0) + X_fit_transform = enc.fit_transform(X_train, y_train) + assert_allclose(X_fit_transform, expected_X_fit_transform) + + assert len(enc.encodings_) == 2 + for i in range(2): + assert_allclose(enc.encodings_[i], expected_encodings[i]) + + X_test_transform = enc.transform(X_test) + assert_allclose(X_test_transform, expected_X_test_transform) + + +@pytest.mark.parametrize( + "y, y_mean", + [ + (np.array([3.4] * 20), 3.4), + (np.array([0] * 20), 0), + (np.array(["a"] * 20, dtype=object), 0), + ], + ids=["continuous", "binary", "binary-string"], +) +@pytest.mark.parametrize("smooth", ["auto", 4.0, 0.0]) +def test_constant_target_and_feature(y, y_mean, smooth): + """Check edge case where feature and target is constant.""" + X = np.array([[1] * 20]).T + n_samples = X.shape[0] + + enc = TargetEncoder(cv=2, smooth=smooth, random_state=0) + X_trans = enc.fit_transform(X, y) + assert_allclose(X_trans, np.repeat([[y_mean]], n_samples, axis=0)) + assert enc.encodings_[0][0] == pytest.approx(y_mean) + assert enc.target_mean_ == pytest.approx(y_mean) + + X_test = np.array([[1], [0]]) + X_test_trans = enc.transform(X_test) + assert_allclose(X_test_trans, np.repeat([[y_mean]], 2, axis=0)) + + +def test_fit_transform_not_associated_with_y_if_ordinal_categorical_is_not( + global_random_seed, +): + cardinality = 30 # not too large, otherwise we need a very large n_samples + n_samples = 3000 + rng = np.random.RandomState(global_random_seed) + y_train = rng.normal(size=n_samples) + X_train = rng.randint(0, cardinality, size=n_samples).reshape(-1, 1) + + # Sort by y_train to attempt to cause a leak + y_sorted_indices = y_train.argsort() + y_train = y_train[y_sorted_indices] + X_train = X_train[y_sorted_indices] + + target_encoder = TargetEncoder(shuffle=True, random_state=global_random_seed) + X_encoded_train_shuffled = target_encoder.fit_transform(X_train, y_train) + + target_encoder = TargetEncoder(shuffle=False) + X_encoded_train_no_shuffled = target_encoder.fit_transform(X_train, y_train) + + # Check that no information about y_train has leaked into X_train: + regressor = RandomForestRegressor( + n_estimators=10, min_samples_leaf=20, random_state=global_random_seed + ) + + # It's impossible to learn a good predictive model on the training set when + # using the original representation X_train or the target encoded + # representation with shuffled inner CV. For the latter, no information + # about y_train has inadvertently leaked into the prior used to generate + # `X_encoded_train_shuffled`: + cv = ShuffleSplit(n_splits=50, random_state=global_random_seed) + assert cross_val_score(regressor, X_train, y_train, cv=cv).mean() < 0.1 + assert ( + cross_val_score(regressor, X_encoded_train_shuffled, y_train, cv=cv).mean() + < 0.1 + ) + + # Without the inner CV shuffling, a lot of information about y_train goes into the + # the per-fold y_train.mean() priors: shrinkage is no longer effective in this + # case and would no longer be able to prevent downstream over-fitting. + assert ( + cross_val_score(regressor, X_encoded_train_no_shuffled, y_train, cv=cv).mean() + > 0.5 + ) + + +def test_smooth_zero(): + """Check edge case with zero smoothing and cv does not contain category.""" + X = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]]).T + y = np.array([2.1, 4.3, 1.2, 3.1, 1.0, 9.0, 10.3, 14.2, 13.3, 15.0]) + + enc = TargetEncoder(smooth=0.0, shuffle=False, cv=2) + X_trans = enc.fit_transform(X, y) + + # With cv = 2, category 0 does not exist in the second half, thus + # it will be encoded as the mean of the second half + assert_allclose(X_trans[0], np.mean(y[5:])) + + # category 1 does not exist in the first half, thus it will be encoded as + # the mean of the first half + assert_allclose(X_trans[-1], np.mean(y[:5])) + + +@pytest.mark.parametrize("smooth", [0.0, 1e3, "auto"]) +def test_invariance_of_encoding_under_label_permutation(smooth, global_random_seed): + # Check that the encoding does not depend on the integer of the value of + # the integer labels. This is quite a trivial property but it is helpful + # to understand the following test. + rng = np.random.RandomState(global_random_seed) + + # Random y and informative categorical X to make the test non-trivial when + # using smoothing. + y = rng.normal(size=1000) + n_categories = 30 + X = KBinsDiscretizer(n_bins=n_categories, encode="ordinal").fit_transform( + y.reshape(-1, 1) + ) + + X_train, X_test, y_train, y_test = train_test_split( + X, y, random_state=global_random_seed + ) + + # Shuffle the labels to make sure that the encoding is invariant to the + # permutation of the labels + permutated_labels = rng.permutation(n_categories) + X_train_permuted = permutated_labels[X_train.astype(np.int32)] + X_test_permuted = permutated_labels[X_test.astype(np.int32)] + + target_encoder = TargetEncoder(smooth=smooth, random_state=global_random_seed) + X_train_encoded = target_encoder.fit_transform(X_train, y_train) + X_test_encoded = target_encoder.transform(X_test) + + X_train_permuted_encoded = target_encoder.fit_transform(X_train_permuted, y_train) + X_test_permuted_encoded = target_encoder.transform(X_test_permuted) + + assert_allclose(X_train_encoded, X_train_permuted_encoded) + assert_allclose(X_test_encoded, X_test_permuted_encoded) + + +# TODO(1.5) remove warning filter when kbd's subsample default is changed +@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000") +@pytest.mark.parametrize("smooth", [0.0, "auto"]) +def test_target_encoding_for_linear_regression(smooth, global_random_seed): + # Check some expected statistical properties when fitting a linear + # regression model on target encoded features depending on their relation + # with that target. + + # In this test, we use the Ridge class with the "lsqr" solver and a little + # bit of regularization to implement a linear regression model that + # converges quickly for large `n_samples` and robustly in case of + # correlated features. Since we will fit this model on a mean centered + # target, we do not need to fit an intercept and this will help simplify + # the analysis with respect to the expected coefficients. + linear_regression = Ridge(alpha=1e-6, solver="lsqr", fit_intercept=False) + + # Construct a random target variable. We need a large number of samples for + # this test to be stable across all values of the random seed. + n_samples = 50_000 + rng = np.random.RandomState(global_random_seed) + y = rng.randn(n_samples) + + # Generate a single informative ordinal feature with medium cardinality. + # Inject some irreducible noise to make it harder for a multivariate model + # to identify the informative feature from other pure noise features. + noise = 0.8 * rng.randn(n_samples) + n_categories = 100 + X_informative = KBinsDiscretizer( + n_bins=n_categories, + encode="ordinal", + strategy="uniform", + random_state=rng, + ).fit_transform((y + noise).reshape(-1, 1)) + + # Let's permute the labels to hide the fact that this feature is + # informative to naive linear regression model trained on the raw ordinal + # values. As highlighted in the previous test, the target encoding should be + # invariant to such a permutation. + permutated_labels = rng.permutation(n_categories) + X_informative = permutated_labels[X_informative.astype(np.int32)] + + # Generate a shuffled copy of the informative feature to destroy the + # relationship with the target. + X_shuffled = rng.permutation(X_informative) + + # Also include a very high cardinality categorical feature that is by + # itself independent of the target variable: target encoding such a feature + # without internal cross-validation should cause catastrophic overfitting + # for the downstream regressor, even with shrinkage. This kind of features + # typically represents near unique identifiers of samples. In general they + # should be removed from a machine learning datasets but here we want to + # study the ability of the default behavior of TargetEncoder to mitigate + # them automatically. + X_near_unique_categories = rng.choice( + int(0.9 * n_samples), size=n_samples, replace=True + ).reshape(-1, 1) + + # Assemble the dataset and do a train-test split: + X = np.concatenate( + [X_informative, X_shuffled, X_near_unique_categories], + axis=1, + ) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + # Let's first check that a linear regression model trained on the raw + # features underfits because of the meaning-less ordinal encoding of the + # labels. + raw_model = linear_regression.fit(X_train, y_train) + assert raw_model.score(X_train, y_train) < 0.1 + assert raw_model.score(X_test, y_test) < 0.1 + + # Now do the same with target encoding using the internal CV mechanism + # implemented when using fit_transform. + model_with_cv = make_pipeline( + TargetEncoder(smooth=smooth, random_state=rng), linear_regression + ).fit(X_train, y_train) + + # This model should be able to fit the data well and also generalise to the + # test data (assuming that the binning is fine-grained enough). The R2 + # scores are not perfect because of the noise injected during the + # generation of the unique informative feature. + coef = model_with_cv[-1].coef_ + assert model_with_cv.score(X_train, y_train) > 0.5, coef + assert model_with_cv.score(X_test, y_test) > 0.5, coef + + # The target encoder recovers the linear relationship with slope 1 between + # the target encoded unique informative predictor and the target. Since the + # target encoding of the 2 other features is not informative thanks to the + # use of internal cross-validation, the multivariate linear regressor + # assigns a coef of 1 to the first feature and 0 to the other 2. + assert coef[0] == pytest.approx(1, abs=1e-2) + assert (np.abs(coef[1:]) < 0.2).all() + + # Let's now disable the internal cross-validation by calling fit and then + # transform separately on the training set: + target_encoder = TargetEncoder(smooth=smooth, random_state=rng).fit( + X_train, y_train + ) + X_enc_no_cv_train = target_encoder.transform(X_train) + X_enc_no_cv_test = target_encoder.transform(X_test) + model_no_cv = linear_regression.fit(X_enc_no_cv_train, y_train) + + # The linear regression model should always overfit because it assigns + # too much weight to the extremely high cardinality feature relatively to + # the informative feature. Note that this is the case even when using + # the empirical Bayes smoothing which is not enough to prevent such + # overfitting alone. + coef = model_no_cv.coef_ + assert model_no_cv.score(X_enc_no_cv_train, y_train) > 0.7, coef + assert model_no_cv.score(X_enc_no_cv_test, y_test) < 0.5, coef + + # The model overfits because it assigns too much weight to the high + # cardinality yet non-informative feature instead of the lower + # cardinality yet informative feature: + assert abs(coef[0]) < abs(coef[2]) + + +def test_pandas_copy_on_write(): + """ + Test target-encoder cython code when y is read-only. + + The numpy array underlying df["y"] is read-only when copy-on-write is enabled. + Non-regression test for gh-27879. + """ + pd = pytest.importorskip("pandas", minversion="2.0") + with pd.option_context("mode.copy_on_write", True): + df = pd.DataFrame({"x": ["a", "b", "b"], "y": [4.0, 5.0, 6.0]}) + TargetEncoder(target_type="continuous").fit(df[["x"]], df["y"]) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..126906cdde1d781b64a443df1e97787fc638a94d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/__init__.py @@ -0,0 +1,11 @@ +""" +The :mod:`sklearn.semi_supervised` module implements semi-supervised learning +algorithms. These algorithms utilize small amounts of labeled data and large +amounts of unlabeled data for classification tasks. This module includes Label +Propagation. +""" + +from ._label_propagation import LabelPropagation, LabelSpreading +from ._self_training import SelfTrainingClassifier + +__all__ = ["SelfTrainingClassifier", "LabelPropagation", "LabelSpreading"] diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e809a3374994d0e61ff418d9a2edfb2fefffb09e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/_label_propagation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/_label_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5693868862db74ac384ed2d94f146eef8fd1e3f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/_label_propagation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/_self_training.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/_self_training.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8df9b273fde2b88bf5b4fa4161d423272cb21c0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/__pycache__/_self_training.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/_label_propagation.py b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/_label_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..1ae37d06a46f32a9ecf35b4aa5bfddd0cedf3563 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/_label_propagation.py @@ -0,0 +1,623 @@ +# coding=utf8 +""" +Label propagation in the context of this module refers to a set of +semi-supervised classification algorithms. At a high level, these algorithms +work by forming a fully-connected graph between all points given and solving +for the steady-state distribution of labels at each point. + +These algorithms perform very well in practice. The cost of running can be very +expensive, at approximately O(N^3) where N is the number of (labeled and +unlabeled) points. The theory (why they perform so well) is motivated by +intuitions from random walk algorithms and geometric relationships in the data. +For more information see the references below. + +Model Features +-------------- +Label clamping: + The algorithm tries to learn distributions of labels over the dataset given + label assignments over an initial subset. In one variant, the algorithm does + not allow for any errors in the initial assignment (hard-clamping) while + in another variant, the algorithm allows for some wiggle room for the initial + assignments, allowing them to change by a fraction alpha in each iteration + (soft-clamping). + +Kernel: + A function which projects a vector into some higher dimensional space. This + implementation supports RBF and KNN kernels. Using the RBF kernel generates + a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of + size O(k*N) which will run much faster. See the documentation for SVMs for + more info on kernels. + +Examples +-------- +>>> import numpy as np +>>> from sklearn import datasets +>>> from sklearn.semi_supervised import LabelPropagation +>>> label_prop_model = LabelPropagation() +>>> iris = datasets.load_iris() +>>> rng = np.random.RandomState(42) +>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3 +>>> labels = np.copy(iris.target) +>>> labels[random_unlabeled_points] = -1 +>>> label_prop_model.fit(iris.data, labels) +LabelPropagation(...) + +Notes +----- +References: +[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised +Learning (2006), pp. 193-216 + +[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient +Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005 +""" + +# Authors: Clay Woolam +# Utkarsh Upadhyay +# License: BSD +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np +from scipy import sparse + +from ..base import BaseEstimator, ClassifierMixin, _fit_context +from ..exceptions import ConvergenceWarning +from ..metrics.pairwise import rbf_kernel +from ..neighbors import NearestNeighbors +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import safe_sparse_dot +from ..utils.fixes import laplacian as csgraph_laplacian +from ..utils.multiclass import check_classification_targets +from ..utils.validation import check_is_fitted + + +class BaseLabelPropagation(ClassifierMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for label propagation module. + + Parameters + ---------- + kernel : {'knn', 'rbf'} or callable, default='rbf' + String identifier for kernel function to use or the kernel function + itself. Only 'rbf' and 'knn' strings are valid inputs. The function + passed should take two inputs, each of shape (n_samples, n_features), + and return a (n_samples, n_samples) shaped weight matrix. + + gamma : float, default=20 + Parameter for rbf kernel. + + n_neighbors : int, default=7 + Parameter for knn kernel. Need to be strictly positive. + + alpha : float, default=1.0 + Clamping factor. + + max_iter : int, default=30 + Change maximum number of iterations allowed. + + tol : float, default=1e-3 + Convergence tolerance: threshold to consider the system at steady + state. + + n_jobs : int, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + """ + + _parameter_constraints: dict = { + "kernel": [StrOptions({"knn", "rbf"}), callable], + "gamma": [Interval(Real, 0, None, closed="left")], + "n_neighbors": [Interval(Integral, 0, None, closed="neither")], + "alpha": [None, Interval(Real, 0, 1, closed="neither")], + "max_iter": [Interval(Integral, 0, None, closed="neither")], + "tol": [Interval(Real, 0, None, closed="left")], + "n_jobs": [None, Integral], + } + + def __init__( + self, + kernel="rbf", + *, + gamma=20, + n_neighbors=7, + alpha=1, + max_iter=30, + tol=1e-3, + n_jobs=None, + ): + self.max_iter = max_iter + self.tol = tol + + # kernel parameters + self.kernel = kernel + self.gamma = gamma + self.n_neighbors = n_neighbors + + # clamping factor + self.alpha = alpha + + self.n_jobs = n_jobs + + def _get_kernel(self, X, y=None): + if self.kernel == "rbf": + if y is None: + return rbf_kernel(X, X, gamma=self.gamma) + else: + return rbf_kernel(X, y, gamma=self.gamma) + elif self.kernel == "knn": + if self.nn_fit is None: + self.nn_fit = NearestNeighbors( + n_neighbors=self.n_neighbors, n_jobs=self.n_jobs + ).fit(X) + if y is None: + return self.nn_fit.kneighbors_graph( + self.nn_fit._fit_X, self.n_neighbors, mode="connectivity" + ) + else: + return self.nn_fit.kneighbors(y, return_distance=False) + elif callable(self.kernel): + if y is None: + return self.kernel(X, X) + else: + return self.kernel(X, y) + + @abstractmethod + def _build_graph(self): + raise NotImplementedError( + "Graph construction must be implemented to fit a label propagation model." + ) + + def predict(self, X): + """Perform inductive inference across the model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + y : ndarray of shape (n_samples,) + Predictions for input data. + """ + # Note: since `predict` does not accept semi-supervised labels as input, + # `fit(X, y).predict(X) != fit(X, y).transduction_`. + # Hence, `fit_predict` is not implemented. + # See https://github.com/scikit-learn/scikit-learn/pull/24898 + probas = self.predict_proba(X) + return self.classes_[np.argmax(probas, axis=1)].ravel() + + def predict_proba(self, X): + """Predict probability for each possible outcome. + + Compute the probability estimates for each single sample in X + and each possible outcome seen during training (categorical + distribution). + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + probabilities : ndarray of shape (n_samples, n_classes) + Normalized probability distributions across + class labels. + """ + check_is_fitted(self) + + X_2d = self._validate_data( + X, + accept_sparse=["csc", "csr", "coo", "dok", "bsr", "lil", "dia"], + reset=False, + ) + weight_matrices = self._get_kernel(self.X_, X_2d) + if self.kernel == "knn": + probabilities = np.array( + [ + np.sum(self.label_distributions_[weight_matrix], axis=0) + for weight_matrix in weight_matrices + ] + ) + else: + weight_matrices = weight_matrices.T + probabilities = safe_sparse_dot(weight_matrices, self.label_distributions_) + normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T + probabilities /= normalizer + return probabilities + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit a semi-supervised label propagation model to X. + + The input samples (labeled and unlabeled) are provided by matrix X, + and target labels are provided by matrix y. We conventionally apply the + label -1 to unlabeled samples in matrix y in a semi-supervised + classification. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target class values with unlabeled points marked as -1. + All unlabeled samples will be transductively assigned labels + internally, which are stored in `transduction_`. + + Returns + ------- + self : object + Returns the instance itself. + """ + X, y = self._validate_data( + X, + y, + accept_sparse=["csr", "csc"], + reset=True, + ) + self.X_ = X + check_classification_targets(y) + + # actual graph construction (implementations should override this) + graph_matrix = self._build_graph() + + # label construction + # construct a categorical distribution for classification only + classes = np.unique(y) + classes = classes[classes != -1] + self.classes_ = classes + + n_samples, n_classes = len(y), len(classes) + + y = np.asarray(y) + unlabeled = y == -1 + + # initialize distributions + self.label_distributions_ = np.zeros((n_samples, n_classes)) + for label in classes: + self.label_distributions_[y == label, classes == label] = 1 + + y_static = np.copy(self.label_distributions_) + if self._variant == "propagation": + # LabelPropagation + y_static[unlabeled] = 0 + else: + # LabelSpreading + y_static *= 1 - self.alpha + + l_previous = np.zeros((self.X_.shape[0], n_classes)) + + unlabeled = unlabeled[:, np.newaxis] + if sparse.issparse(graph_matrix): + graph_matrix = graph_matrix.tocsr() + + for self.n_iter_ in range(self.max_iter): + if np.abs(self.label_distributions_ - l_previous).sum() < self.tol: + break + + l_previous = self.label_distributions_ + self.label_distributions_ = safe_sparse_dot( + graph_matrix, self.label_distributions_ + ) + + if self._variant == "propagation": + normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis] + normalizer[normalizer == 0] = 1 + self.label_distributions_ /= normalizer + self.label_distributions_ = np.where( + unlabeled, self.label_distributions_, y_static + ) + else: + # clamp + self.label_distributions_ = ( + np.multiply(self.alpha, self.label_distributions_) + y_static + ) + else: + warnings.warn( + "max_iter=%d was reached without convergence." % self.max_iter, + category=ConvergenceWarning, + ) + self.n_iter_ += 1 + + normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis] + normalizer[normalizer == 0] = 1 + self.label_distributions_ /= normalizer + + # set the transduction item + transduction = self.classes_[np.argmax(self.label_distributions_, axis=1)] + self.transduction_ = transduction.ravel() + return self + + +class LabelPropagation(BaseLabelPropagation): + """Label Propagation classifier. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + kernel : {'knn', 'rbf'} or callable, default='rbf' + String identifier for kernel function to use or the kernel function + itself. Only 'rbf' and 'knn' strings are valid inputs. The function + passed should take two inputs, each of shape (n_samples, n_features), + and return a (n_samples, n_samples) shaped weight matrix. + + gamma : float, default=20 + Parameter for rbf kernel. + + n_neighbors : int, default=7 + Parameter for knn kernel which need to be strictly positive. + + max_iter : int, default=1000 + Change maximum number of iterations allowed. + + tol : float, 1e-3 + Convergence tolerance: threshold to consider the system at steady + state. + + n_jobs : int, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + X_ : {array-like, sparse matrix} of shape (n_samples, n_features) + Input array. + + classes_ : ndarray of shape (n_classes,) + The distinct labels used in classifying instances. + + label_distributions_ : ndarray of shape (n_samples, n_classes) + Categorical distribution for each item. + + transduction_ : ndarray of shape (n_samples) + Label assigned to each item during :term:`fit`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run. + + See Also + -------- + LabelSpreading : Alternate label propagation strategy more robust to noise. + + References + ---------- + Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data + with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon + University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf + + Examples + -------- + >>> import numpy as np + >>> from sklearn import datasets + >>> from sklearn.semi_supervised import LabelPropagation + >>> label_prop_model = LabelPropagation() + >>> iris = datasets.load_iris() + >>> rng = np.random.RandomState(42) + >>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3 + >>> labels = np.copy(iris.target) + >>> labels[random_unlabeled_points] = -1 + >>> label_prop_model.fit(iris.data, labels) + LabelPropagation(...) + """ + + _variant = "propagation" + + _parameter_constraints: dict = {**BaseLabelPropagation._parameter_constraints} + _parameter_constraints.pop("alpha") + + def __init__( + self, + kernel="rbf", + *, + gamma=20, + n_neighbors=7, + max_iter=1000, + tol=1e-3, + n_jobs=None, + ): + super().__init__( + kernel=kernel, + gamma=gamma, + n_neighbors=n_neighbors, + max_iter=max_iter, + tol=tol, + n_jobs=n_jobs, + alpha=None, + ) + + def _build_graph(self): + """Matrix representing a fully connected graph between each sample + + This basic implementation creates a non-stochastic affinity matrix, so + class distributions will exceed 1 (normalization may be desired). + """ + if self.kernel == "knn": + self.nn_fit = None + affinity_matrix = self._get_kernel(self.X_) + normalizer = affinity_matrix.sum(axis=0) + if sparse.issparse(affinity_matrix): + affinity_matrix.data /= np.diag(np.array(normalizer)) + else: + affinity_matrix /= normalizer[:, np.newaxis] + return affinity_matrix + + def fit(self, X, y): + """Fit a semi-supervised label propagation model to X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target class values with unlabeled points marked as -1. + All unlabeled samples will be transductively assigned labels + internally, which are stored in `transduction_`. + + Returns + ------- + self : object + Returns the instance itself. + """ + return super().fit(X, y) + + +class LabelSpreading(BaseLabelPropagation): + """LabelSpreading model for semi-supervised learning. + + This model is similar to the basic Label Propagation algorithm, + but uses affinity matrix based on the normalized graph Laplacian + and soft clamping across the labels. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + kernel : {'knn', 'rbf'} or callable, default='rbf' + String identifier for kernel function to use or the kernel function + itself. Only 'rbf' and 'knn' strings are valid inputs. The function + passed should take two inputs, each of shape (n_samples, n_features), + and return a (n_samples, n_samples) shaped weight matrix. + + gamma : float, default=20 + Parameter for rbf kernel. + + n_neighbors : int, default=7 + Parameter for knn kernel which is a strictly positive integer. + + alpha : float, default=0.2 + Clamping factor. A value in (0, 1) that specifies the relative amount + that an instance should adopt the information from its neighbors as + opposed to its initial label. + alpha=0 means keeping the initial label information; alpha=1 means + replacing all initial information. + + max_iter : int, default=30 + Maximum number of iterations allowed. + + tol : float, default=1e-3 + Convergence tolerance: threshold to consider the system at steady + state. + + n_jobs : int, default=None + The number of parallel jobs to run. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + X_ : ndarray of shape (n_samples, n_features) + Input array. + + classes_ : ndarray of shape (n_classes,) + The distinct labels used in classifying instances. + + label_distributions_ : ndarray of shape (n_samples, n_classes) + Categorical distribution for each item. + + transduction_ : ndarray of shape (n_samples,) + Label assigned to each item during :term:`fit`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run. + + See Also + -------- + LabelPropagation : Unregularized graph based semi-supervised learning. + + References + ---------- + `Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston, + Bernhard Schoelkopf. Learning with local and global consistency (2004) + `_ + + Examples + -------- + >>> import numpy as np + >>> from sklearn import datasets + >>> from sklearn.semi_supervised import LabelSpreading + >>> label_prop_model = LabelSpreading() + >>> iris = datasets.load_iris() + >>> rng = np.random.RandomState(42) + >>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3 + >>> labels = np.copy(iris.target) + >>> labels[random_unlabeled_points] = -1 + >>> label_prop_model.fit(iris.data, labels) + LabelSpreading(...) + """ + + _variant = "spreading" + + _parameter_constraints: dict = {**BaseLabelPropagation._parameter_constraints} + _parameter_constraints["alpha"] = [Interval(Real, 0, 1, closed="neither")] + + def __init__( + self, + kernel="rbf", + *, + gamma=20, + n_neighbors=7, + alpha=0.2, + max_iter=30, + tol=1e-3, + n_jobs=None, + ): + # this one has different base parameters + super().__init__( + kernel=kernel, + gamma=gamma, + n_neighbors=n_neighbors, + alpha=alpha, + max_iter=max_iter, + tol=tol, + n_jobs=n_jobs, + ) + + def _build_graph(self): + """Graph matrix for Label Spreading computes the graph laplacian""" + # compute affinity matrix (or gram matrix) + if self.kernel == "knn": + self.nn_fit = None + n_samples = self.X_.shape[0] + affinity_matrix = self._get_kernel(self.X_) + laplacian = csgraph_laplacian(affinity_matrix, normed=True) + laplacian = -laplacian + if sparse.issparse(laplacian): + diag_mask = laplacian.row == laplacian.col + laplacian.data[diag_mask] = 0.0 + else: + laplacian.flat[:: n_samples + 1] = 0.0 # set diag to 0.0 + return laplacian diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/_self_training.py b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/_self_training.py new file mode 100644 index 0000000000000000000000000000000000000000..810447c1e6f460df424034c4a89054421f525295 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/_self_training.py @@ -0,0 +1,417 @@ +import warnings +from numbers import Integral, Real + +import numpy as np + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone +from ..utils import safe_mask +from ..utils._param_validation import HasMethods, Interval, StrOptions +from ..utils.metadata_routing import _RoutingNotSupportedMixin +from ..utils.metaestimators import available_if +from ..utils.validation import check_is_fitted + +__all__ = ["SelfTrainingClassifier"] + +# Authors: Oliver Rausch +# Patrice Becker +# License: BSD 3 clause + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + First, we check the fitted `base_estimator_` if available, otherwise we check + the unfitted `base_estimator`. We raise the original `AttributeError` if + `attr` does not exist. This function is used together with `available_if`. + """ + + def check(self): + if hasattr(self, "base_estimator_"): + getattr(self.base_estimator_, attr) + else: + getattr(self.base_estimator, attr) + + return True + + return check + + +class SelfTrainingClassifier( + _RoutingNotSupportedMixin, MetaEstimatorMixin, BaseEstimator +): + """Self-training classifier. + + This :term:`metaestimator` allows a given supervised classifier to function as a + semi-supervised classifier, allowing it to learn from unlabeled data. It + does this by iteratively predicting pseudo-labels for the unlabeled data + and adding them to the training set. + + The classifier will continue iterating until either max_iter is reached, or + no pseudo-labels were added to the training set in the previous iteration. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + base_estimator : estimator object + An estimator object implementing `fit` and `predict_proba`. + Invoking the `fit` method will fit a clone of the passed estimator, + which will be stored in the `base_estimator_` attribute. + + threshold : float, default=0.75 + The decision threshold for use with `criterion='threshold'`. + Should be in [0, 1). When using the `'threshold'` criterion, a + :ref:`well calibrated classifier ` should be used. + + criterion : {'threshold', 'k_best'}, default='threshold' + The selection criterion used to select which labels to add to the + training set. If `'threshold'`, pseudo-labels with prediction + probabilities above `threshold` are added to the dataset. If `'k_best'`, + the `k_best` pseudo-labels with highest prediction probabilities are + added to the dataset. When using the 'threshold' criterion, a + :ref:`well calibrated classifier ` should be used. + + k_best : int, default=10 + The amount of samples to add in each iteration. Only used when + `criterion='k_best'`. + + max_iter : int or None, default=10 + Maximum number of iterations allowed. Should be greater than or equal + to 0. If it is `None`, the classifier will continue to predict labels + until no new pseudo-labels are added, or all unlabeled samples have + been labeled. + + verbose : bool, default=False + Enable verbose output. + + Attributes + ---------- + base_estimator_ : estimator object + The fitted estimator. + + classes_ : ndarray or list of ndarray of shape (n_classes,) + Class labels for each output. (Taken from the trained + `base_estimator_`). + + transduction_ : ndarray of shape (n_samples,) + The labels used for the final fit of the classifier, including + pseudo-labels added during fit. + + labeled_iter_ : ndarray of shape (n_samples,) + The iteration in which each sample was labeled. When a sample has + iteration 0, the sample was already labeled in the original dataset. + When a sample has iteration -1, the sample was not labeled in any + iteration. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The number of rounds of self-training, that is the number of times the + base estimator is fitted on relabeled variants of the training set. + + termination_condition_ : {'max_iter', 'no_change', 'all_labeled'} + The reason that fitting was stopped. + + - `'max_iter'`: `n_iter_` reached `max_iter`. + - `'no_change'`: no new labels were predicted. + - `'all_labeled'`: all unlabeled samples were labeled before `max_iter` + was reached. + + See Also + -------- + LabelPropagation : Label propagation classifier. + LabelSpreading : Label spreading model for semi-supervised learning. + + References + ---------- + :doi:`David Yarowsky. 1995. Unsupervised word sense disambiguation rivaling + supervised methods. In Proceedings of the 33rd annual meeting on + Association for Computational Linguistics (ACL '95). Association for + Computational Linguistics, Stroudsburg, PA, USA, 189-196. + <10.3115/981658.981684>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn import datasets + >>> from sklearn.semi_supervised import SelfTrainingClassifier + >>> from sklearn.svm import SVC + >>> rng = np.random.RandomState(42) + >>> iris = datasets.load_iris() + >>> random_unlabeled_points = rng.rand(iris.target.shape[0]) < 0.3 + >>> iris.target[random_unlabeled_points] = -1 + >>> svc = SVC(probability=True, gamma="auto") + >>> self_training_model = SelfTrainingClassifier(svc) + >>> self_training_model.fit(iris.data, iris.target) + SelfTrainingClassifier(...) + """ + + _estimator_type = "classifier" + + _parameter_constraints: dict = { + # We don't require `predic_proba` here to allow passing a meta-estimator + # that only exposes `predict_proba` after fitting. + "base_estimator": [HasMethods(["fit"])], + "threshold": [Interval(Real, 0.0, 1.0, closed="left")], + "criterion": [StrOptions({"threshold", "k_best"})], + "k_best": [Interval(Integral, 1, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left"), None], + "verbose": ["verbose"], + } + + def __init__( + self, + base_estimator, + threshold=0.75, + criterion="threshold", + k_best=10, + max_iter=10, + verbose=False, + ): + self.base_estimator = base_estimator + self.threshold = threshold + self.criterion = criterion + self.k_best = k_best + self.max_iter = max_iter + self.verbose = verbose + + @_fit_context( + # SelfTrainingClassifier.base_estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """ + Fit self-training classifier using `X`, `y` as training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array representing the data. + + y : {array-like, sparse matrix} of shape (n_samples,) + Array representing the labels. Unlabeled samples should have the + label -1. + + Returns + ------- + self : object + Fitted estimator. + """ + # we need row slicing support for sparse matrices, but costly finiteness check + # can be delegated to the base estimator. + X, y = self._validate_data( + X, y, accept_sparse=["csr", "csc", "lil", "dok"], force_all_finite=False + ) + + self.base_estimator_ = clone(self.base_estimator) + + if y.dtype.kind in ["U", "S"]: + raise ValueError( + "y has dtype string. If you wish to predict on " + "string targets, use dtype object, and use -1" + " as the label for unlabeled samples." + ) + + has_label = y != -1 + + if np.all(has_label): + warnings.warn("y contains no unlabeled samples", UserWarning) + + if self.criterion == "k_best" and ( + self.k_best > X.shape[0] - np.sum(has_label) + ): + warnings.warn( + ( + "k_best is larger than the amount of unlabeled " + "samples. All unlabeled samples will be labeled in " + "the first iteration" + ), + UserWarning, + ) + + self.transduction_ = np.copy(y) + self.labeled_iter_ = np.full_like(y, -1) + self.labeled_iter_[has_label] = 0 + + self.n_iter_ = 0 + + while not np.all(has_label) and ( + self.max_iter is None or self.n_iter_ < self.max_iter + ): + self.n_iter_ += 1 + self.base_estimator_.fit( + X[safe_mask(X, has_label)], self.transduction_[has_label] + ) + + # Predict on the unlabeled samples + prob = self.base_estimator_.predict_proba(X[safe_mask(X, ~has_label)]) + pred = self.base_estimator_.classes_[np.argmax(prob, axis=1)] + max_proba = np.max(prob, axis=1) + + # Select new labeled samples + if self.criterion == "threshold": + selected = max_proba > self.threshold + else: + n_to_select = min(self.k_best, max_proba.shape[0]) + if n_to_select == max_proba.shape[0]: + selected = np.ones_like(max_proba, dtype=bool) + else: + # NB these are indices, not a mask + selected = np.argpartition(-max_proba, n_to_select)[:n_to_select] + + # Map selected indices into original array + selected_full = np.nonzero(~has_label)[0][selected] + + # Add newly labeled confident predictions to the dataset + self.transduction_[selected_full] = pred[selected] + has_label[selected_full] = True + self.labeled_iter_[selected_full] = self.n_iter_ + + if selected_full.shape[0] == 0: + # no changed labels + self.termination_condition_ = "no_change" + break + + if self.verbose: + print( + f"End of iteration {self.n_iter_}," + f" added {selected_full.shape[0]} new labels." + ) + + if self.n_iter_ == self.max_iter: + self.termination_condition_ = "max_iter" + if np.all(has_label): + self.termination_condition_ = "all_labeled" + + self.base_estimator_.fit( + X[safe_mask(X, has_label)], self.transduction_[has_label] + ) + self.classes_ = self.base_estimator_.classes_ + return self + + @available_if(_estimator_has("predict")) + def predict(self, X): + """Predict the classes of `X`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array representing the data. + + Returns + ------- + y : ndarray of shape (n_samples,) + Array with predicted labels. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=True, + force_all_finite=False, + reset=False, + ) + return self.base_estimator_.predict(X) + + @available_if(_estimator_has("predict_proba")) + def predict_proba(self, X): + """Predict probability for each possible outcome. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array representing the data. + + Returns + ------- + y : ndarray of shape (n_samples, n_features) + Array with prediction probabilities. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=True, + force_all_finite=False, + reset=False, + ) + return self.base_estimator_.predict_proba(X) + + @available_if(_estimator_has("decision_function")) + def decision_function(self, X): + """Call decision function of the `base_estimator`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array representing the data. + + Returns + ------- + y : ndarray of shape (n_samples, n_features) + Result of the decision function of the `base_estimator`. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=True, + force_all_finite=False, + reset=False, + ) + return self.base_estimator_.decision_function(X) + + @available_if(_estimator_has("predict_log_proba")) + def predict_log_proba(self, X): + """Predict log probability for each possible outcome. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array representing the data. + + Returns + ------- + y : ndarray of shape (n_samples, n_features) + Array with log prediction probabilities. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=True, + force_all_finite=False, + reset=False, + ) + return self.base_estimator_.predict_log_proba(X) + + @available_if(_estimator_has("score")) + def score(self, X, y): + """Call score on the `base_estimator`. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array representing the data. + + y : array-like of shape (n_samples,) + Array representing the labels. + + Returns + ------- + score : float + Result of calling score on the `base_estimator`. + """ + check_is_fitted(self) + X = self._validate_data( + X, + accept_sparse=True, + force_all_finite=False, + reset=False, + ) + return self.base_estimator_.score(X, y) diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a68aef3c79b02461a386e6d8e64a2de9f5ebcae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/test_label_propagation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/test_label_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6ecbc3684d01ddf5e51653c353a2948e2254b33 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/test_label_propagation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/test_self_training.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/test_self_training.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb7e6b5510367c94806da1ac22715eadb61d2faf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/__pycache__/test_self_training.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/test_label_propagation.py b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/test_label_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..8812c3c352a0378f2d24e336cf8b4f0f29fd42a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/test_label_propagation.py @@ -0,0 +1,238 @@ +""" test the label propagation module """ + +import warnings + +import numpy as np +import pytest +from scipy.sparse import issparse + +from sklearn.datasets import make_classification +from sklearn.exceptions import ConvergenceWarning +from sklearn.metrics.pairwise import rbf_kernel +from sklearn.model_selection import train_test_split +from sklearn.neighbors import NearestNeighbors +from sklearn.semi_supervised import _label_propagation as label_propagation +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_array_equal, +) + +CONSTRUCTOR_TYPES = ("array", "sparse_csr", "sparse_csc") + +ESTIMATORS = [ + (label_propagation.LabelPropagation, {"kernel": "rbf"}), + (label_propagation.LabelPropagation, {"kernel": "knn", "n_neighbors": 2}), + ( + label_propagation.LabelPropagation, + {"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)}, + ), + (label_propagation.LabelSpreading, {"kernel": "rbf"}), + (label_propagation.LabelSpreading, {"kernel": "knn", "n_neighbors": 2}), + ( + label_propagation.LabelSpreading, + {"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)}, + ), +] + + +@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS) +def test_fit_transduction(global_dtype, Estimator, parameters): + samples = np.asarray([[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]], dtype=global_dtype) + labels = [0, 1, -1] + clf = Estimator(**parameters).fit(samples, labels) + assert clf.transduction_[2] == 1 + + +@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS) +def test_distribution(global_dtype, Estimator, parameters): + if parameters["kernel"] == "knn": + pytest.skip( + "Unstable test for this configuration: changes in k-NN ordering break it." + ) + samples = np.asarray([[1.0, 0.0], [0.0, 1.0], [1.0, 1.0]], dtype=global_dtype) + labels = [0, 1, -1] + clf = Estimator(**parameters).fit(samples, labels) + assert_allclose(clf.label_distributions_[2], [0.5, 0.5], atol=1e-2) + + +@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS) +def test_predict(global_dtype, Estimator, parameters): + samples = np.asarray([[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]], dtype=global_dtype) + labels = [0, 1, -1] + clf = Estimator(**parameters).fit(samples, labels) + assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1])) + + +@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS) +def test_predict_proba(global_dtype, Estimator, parameters): + samples = np.asarray([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]], dtype=global_dtype) + labels = [0, 1, -1] + clf = Estimator(**parameters).fit(samples, labels) + assert_allclose(clf.predict_proba([[1.0, 1.0]]), np.array([[0.5, 0.5]])) + + +@pytest.mark.parametrize("alpha", [0.1, 0.3, 0.5, 0.7, 0.9]) +@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS) +def test_label_spreading_closed_form(global_dtype, Estimator, parameters, alpha): + n_classes = 2 + X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0) + X = X.astype(global_dtype, copy=False) + y[::3] = -1 + + gamma = 0.1 + clf = label_propagation.LabelSpreading(gamma=gamma).fit(X, y) + # adopting notation from Zhou et al (2004): + S = clf._build_graph() + Y = np.zeros((len(y), n_classes + 1), dtype=X.dtype) + Y[np.arange(len(y)), y] = 1 + Y = Y[:, :-1] + + expected = np.dot(np.linalg.inv(np.eye(len(S), dtype=S.dtype) - alpha * S), Y) + expected /= expected.sum(axis=1)[:, np.newaxis] + + clf = label_propagation.LabelSpreading( + max_iter=100, alpha=alpha, tol=1e-10, gamma=gamma + ) + clf.fit(X, y) + + assert_allclose(expected, clf.label_distributions_) + + +def test_label_propagation_closed_form(global_dtype): + n_classes = 2 + X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0) + X = X.astype(global_dtype, copy=False) + y[::3] = -1 + Y = np.zeros((len(y), n_classes + 1)) + Y[np.arange(len(y)), y] = 1 + unlabelled_idx = Y[:, (-1,)].nonzero()[0] + labelled_idx = (Y[:, (-1,)] == 0).nonzero()[0] + + clf = label_propagation.LabelPropagation(max_iter=100, tol=1e-10, gamma=0.1) + clf.fit(X, y) + # adopting notation from Zhu et al 2002 + T_bar = clf._build_graph() + Tuu = T_bar[tuple(np.meshgrid(unlabelled_idx, unlabelled_idx, indexing="ij"))] + Tul = T_bar[tuple(np.meshgrid(unlabelled_idx, labelled_idx, indexing="ij"))] + Y = Y[:, :-1] + Y_l = Y[labelled_idx, :] + Y_u = np.dot(np.dot(np.linalg.inv(np.eye(Tuu.shape[0]) - Tuu), Tul), Y_l) + + expected = Y.copy() + expected[unlabelled_idx, :] = Y_u + expected /= expected.sum(axis=1)[:, np.newaxis] + + assert_allclose(expected, clf.label_distributions_, atol=1e-4) + + +@pytest.mark.parametrize("accepted_sparse_type", ["sparse_csr", "sparse_csc"]) +@pytest.mark.parametrize("index_dtype", [np.int32, np.int64]) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS) +def test_sparse_input_types( + accepted_sparse_type, index_dtype, dtype, Estimator, parameters +): + # This is non-regression test for #17085 + X = _convert_container([[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]], accepted_sparse_type) + X.data = X.data.astype(dtype, copy=False) + X.indices = X.indices.astype(index_dtype, copy=False) + X.indptr = X.indptr.astype(index_dtype, copy=False) + labels = [0, 1, -1] + clf = Estimator(**parameters).fit(X, labels) + assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1])) + + +@pytest.mark.parametrize("constructor_type", CONSTRUCTOR_TYPES) +def test_convergence_speed(constructor_type): + # This is a non-regression test for #5774 + X = _convert_container([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]], constructor_type) + y = np.array([0, 1, -1]) + mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=5000) + mdl.fit(X, y) + + # this should converge quickly: + assert mdl.n_iter_ < 10 + assert_array_equal(mdl.predict(X), [0, 1, 1]) + + +def test_convergence_warning(): + # This is a non-regression test for #5774 + X = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]]) + y = np.array([0, 1, -1]) + mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=1) + warn_msg = "max_iter=1 was reached without convergence." + with pytest.warns(ConvergenceWarning, match=warn_msg): + mdl.fit(X, y) + assert mdl.n_iter_ == mdl.max_iter + + mdl = label_propagation.LabelPropagation(kernel="rbf", max_iter=1) + with pytest.warns(ConvergenceWarning, match=warn_msg): + mdl.fit(X, y) + assert mdl.n_iter_ == mdl.max_iter + + mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=500) + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + mdl.fit(X, y) + + mdl = label_propagation.LabelPropagation(kernel="rbf", max_iter=500) + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + mdl.fit(X, y) + + +@pytest.mark.parametrize( + "LabelPropagationCls", + [label_propagation.LabelSpreading, label_propagation.LabelPropagation], +) +def test_label_propagation_non_zero_normalizer(LabelPropagationCls): + # check that we don't divide by zero in case of null normalizer + # non-regression test for + # https://github.com/scikit-learn/scikit-learn/pull/15946 + # https://github.com/scikit-learn/scikit-learn/issues/9292 + X = np.array([[100.0, 100.0], [100.0, 100.0], [0.0, 0.0], [0.0, 0.0]]) + y = np.array([0, 1, -1, -1]) + mdl = LabelPropagationCls(kernel="knn", max_iter=100, n_neighbors=1) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + mdl.fit(X, y) + + +def test_predict_sparse_callable_kernel(global_dtype): + # This is a non-regression test for #15866 + + # Custom sparse kernel (top-K RBF) + def topk_rbf(X, Y=None, n_neighbors=10, gamma=1e-5): + nn = NearestNeighbors(n_neighbors=10, metric="euclidean", n_jobs=2) + nn.fit(X) + W = -1 * nn.kneighbors_graph(Y, mode="distance").power(2) * gamma + np.exp(W.data, out=W.data) + assert issparse(W) + return W.T + + n_classes = 4 + n_samples = 500 + n_test = 10 + X, y = make_classification( + n_classes=n_classes, + n_samples=n_samples, + n_features=20, + n_informative=20, + n_redundant=0, + n_repeated=0, + random_state=0, + ) + X = X.astype(global_dtype) + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=n_test, random_state=0 + ) + + model = label_propagation.LabelSpreading(kernel=topk_rbf) + model.fit(X_train, y_train) + assert model.score(X_test, y_test) >= 0.9 + + model = label_propagation.LabelPropagation(kernel=topk_rbf) + model.fit(X_train, y_train) + assert model.score(X_test, y_test) >= 0.9 diff --git a/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/test_self_training.py b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/test_self_training.py new file mode 100644 index 0000000000000000000000000000000000000000..2efeb32446f8927071d873dd6e586945fb73f6d8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sklearn/semi_supervised/tests/test_self_training.py @@ -0,0 +1,345 @@ +from math import ceil + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.datasets import load_iris, make_blobs +from sklearn.ensemble import StackingClassifier +from sklearn.exceptions import NotFittedError +from sklearn.metrics import accuracy_score +from sklearn.model_selection import train_test_split +from sklearn.neighbors import KNeighborsClassifier +from sklearn.semi_supervised import SelfTrainingClassifier +from sklearn.svm import SVC +from sklearn.tree import DecisionTreeClassifier + +# Author: Oliver Rausch +# License: BSD 3 clause + +# load the iris dataset and randomly permute it +iris = load_iris() +X_train, X_test, y_train, y_test = train_test_split( + iris.data, iris.target, random_state=0 +) + +n_labeled_samples = 50 + +y_train_missing_labels = y_train.copy() +y_train_missing_labels[n_labeled_samples:] = -1 +mapping = {0: "A", 1: "B", 2: "C", -1: "-1"} +y_train_missing_strings = np.vectorize(mapping.get)(y_train_missing_labels).astype( + object +) +y_train_missing_strings[y_train_missing_labels == -1] = -1 + + +def test_warns_k_best(): + st = SelfTrainingClassifier(KNeighborsClassifier(), criterion="k_best", k_best=1000) + with pytest.warns(UserWarning, match="k_best is larger than"): + st.fit(X_train, y_train_missing_labels) + + assert st.termination_condition_ == "all_labeled" + + +@pytest.mark.parametrize( + "base_estimator", + [KNeighborsClassifier(), SVC(gamma="scale", probability=True, random_state=0)], +) +@pytest.mark.parametrize("selection_crit", ["threshold", "k_best"]) +def test_classification(base_estimator, selection_crit): + # Check classification for various parameter settings. + # Also assert that predictions for strings and numerical labels are equal. + # Also test for multioutput classification + threshold = 0.75 + max_iter = 10 + st = SelfTrainingClassifier( + base_estimator, max_iter=max_iter, threshold=threshold, criterion=selection_crit + ) + st.fit(X_train, y_train_missing_labels) + pred = st.predict(X_test) + proba = st.predict_proba(X_test) + + st_string = SelfTrainingClassifier( + base_estimator, max_iter=max_iter, criterion=selection_crit, threshold=threshold + ) + st_string.fit(X_train, y_train_missing_strings) + pred_string = st_string.predict(X_test) + proba_string = st_string.predict_proba(X_test) + + assert_array_equal(np.vectorize(mapping.get)(pred), pred_string) + assert_array_equal(proba, proba_string) + + assert st.termination_condition_ == st_string.termination_condition_ + # Check consistency between labeled_iter, n_iter and max_iter + labeled = y_train_missing_labels != -1 + # assert that labeled samples have labeled_iter = 0 + assert_array_equal(st.labeled_iter_ == 0, labeled) + # assert that labeled samples do not change label during training + assert_array_equal(y_train_missing_labels[labeled], st.transduction_[labeled]) + + # assert that the max of the iterations is less than the total amount of + # iterations + assert np.max(st.labeled_iter_) <= st.n_iter_ <= max_iter + assert np.max(st_string.labeled_iter_) <= st_string.n_iter_ <= max_iter + + # check shapes + assert st.labeled_iter_.shape == st.transduction_.shape + assert st_string.labeled_iter_.shape == st_string.transduction_.shape + + +def test_k_best(): + st = SelfTrainingClassifier( + KNeighborsClassifier(n_neighbors=1), + criterion="k_best", + k_best=10, + max_iter=None, + ) + y_train_only_one_label = np.copy(y_train) + y_train_only_one_label[1:] = -1 + n_samples = y_train.shape[0] + + n_expected_iter = ceil((n_samples - 1) / 10) + st.fit(X_train, y_train_only_one_label) + assert st.n_iter_ == n_expected_iter + + # Check labeled_iter_ + assert np.sum(st.labeled_iter_ == 0) == 1 + for i in range(1, n_expected_iter): + assert np.sum(st.labeled_iter_ == i) == 10 + assert np.sum(st.labeled_iter_ == n_expected_iter) == (n_samples - 1) % 10 + assert st.termination_condition_ == "all_labeled" + + +def test_sanity_classification(): + base_estimator = SVC(gamma="scale", probability=True) + base_estimator.fit(X_train[n_labeled_samples:], y_train[n_labeled_samples:]) + + st = SelfTrainingClassifier(base_estimator) + st.fit(X_train, y_train_missing_labels) + + pred1, pred2 = base_estimator.predict(X_test), st.predict(X_test) + assert not np.array_equal(pred1, pred2) + score_supervised = accuracy_score(base_estimator.predict(X_test), y_test) + score_self_training = accuracy_score(st.predict(X_test), y_test) + + assert score_self_training > score_supervised + + +def test_none_iter(): + # Check that the all samples were labeled after a 'reasonable' number of + # iterations. + st = SelfTrainingClassifier(KNeighborsClassifier(), threshold=0.55, max_iter=None) + st.fit(X_train, y_train_missing_labels) + + assert st.n_iter_ < 10 + assert st.termination_condition_ == "all_labeled" + + +@pytest.mark.parametrize( + "base_estimator", + [KNeighborsClassifier(), SVC(gamma="scale", probability=True, random_state=0)], +) +@pytest.mark.parametrize("y", [y_train_missing_labels, y_train_missing_strings]) +def test_zero_iterations(base_estimator, y): + # Check classification for zero iterations. + # Fitting a SelfTrainingClassifier with zero iterations should give the + # same results as fitting a supervised classifier. + # This also asserts that string arrays work as expected. + + clf1 = SelfTrainingClassifier(base_estimator, max_iter=0) + + clf1.fit(X_train, y) + + clf2 = base_estimator.fit(X_train[:n_labeled_samples], y[:n_labeled_samples]) + + assert_array_equal(clf1.predict(X_test), clf2.predict(X_test)) + assert clf1.termination_condition_ == "max_iter" + + +def test_prefitted_throws_error(): + # Test that passing a pre-fitted classifier and calling predict throws an + # error + knn = KNeighborsClassifier() + knn.fit(X_train, y_train) + st = SelfTrainingClassifier(knn) + with pytest.raises( + NotFittedError, + match="This SelfTrainingClassifier instance is not fitted yet", + ): + st.predict(X_train) + + +@pytest.mark.parametrize("max_iter", range(1, 5)) +def test_labeled_iter(max_iter): + # Check that the amount of datapoints labeled in iteration 0 is equal to + # the amount of labeled datapoints we passed. + st = SelfTrainingClassifier(KNeighborsClassifier(), max_iter=max_iter) + + st.fit(X_train, y_train_missing_labels) + amount_iter_0 = len(st.labeled_iter_[st.labeled_iter_ == 0]) + assert amount_iter_0 == n_labeled_samples + # Check that the max of the iterations is less than the total amount of + # iterations + assert np.max(st.labeled_iter_) <= st.n_iter_ <= max_iter + + +def test_no_unlabeled(): + # Test that training on a fully labeled dataset produces the same results + # as training the classifier by itself. + knn = KNeighborsClassifier() + knn.fit(X_train, y_train) + st = SelfTrainingClassifier(knn) + with pytest.warns(UserWarning, match="y contains no unlabeled samples"): + st.fit(X_train, y_train) + assert_array_equal(knn.predict(X_test), st.predict(X_test)) + # Assert that all samples were labeled in iteration 0 (since there were no + # unlabeled samples). + assert np.all(st.labeled_iter_ == 0) + assert st.termination_condition_ == "all_labeled" + + +def test_early_stopping(): + svc = SVC(gamma="scale", probability=True) + st = SelfTrainingClassifier(svc) + X_train_easy = [[1], [0], [1], [0.5]] + y_train_easy = [1, 0, -1, -1] + # X = [[0.5]] cannot be predicted on with a high confidence, so training + # stops early + st.fit(X_train_easy, y_train_easy) + assert st.n_iter_ == 1 + assert st.termination_condition_ == "no_change" + + +def test_strings_dtype(): + clf = SelfTrainingClassifier(KNeighborsClassifier()) + X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1) + labels_multiclass = ["one", "two", "three"] + + y_strings = np.take(labels_multiclass, y) + + with pytest.raises(ValueError, match="dtype"): + clf.fit(X, y_strings) + + +@pytest.mark.parametrize("verbose", [True, False]) +def test_verbose(capsys, verbose): + clf = SelfTrainingClassifier(KNeighborsClassifier(), verbose=verbose) + clf.fit(X_train, y_train_missing_labels) + + captured = capsys.readouterr() + + if verbose: + assert "iteration" in captured.out + else: + assert "iteration" not in captured.out + + +def test_verbose_k_best(capsys): + st = SelfTrainingClassifier( + KNeighborsClassifier(n_neighbors=1), + criterion="k_best", + k_best=10, + verbose=True, + max_iter=None, + ) + + y_train_only_one_label = np.copy(y_train) + y_train_only_one_label[1:] = -1 + n_samples = y_train.shape[0] + + n_expected_iter = ceil((n_samples - 1) / 10) + st.fit(X_train, y_train_only_one_label) + + captured = capsys.readouterr() + + msg = "End of iteration {}, added {} new labels." + for i in range(1, n_expected_iter): + assert msg.format(i, 10) in captured.out + + assert msg.format(n_expected_iter, (n_samples - 1) % 10) in captured.out + + +def test_k_best_selects_best(): + # Tests that the labels added by st really are the 10 best labels. + svc = SVC(gamma="scale", probability=True, random_state=0) + st = SelfTrainingClassifier(svc, criterion="k_best", max_iter=1, k_best=10) + has_label = y_train_missing_labels != -1 + st.fit(X_train, y_train_missing_labels) + + got_label = ~has_label & (st.transduction_ != -1) + + svc.fit(X_train[has_label], y_train_missing_labels[has_label]) + pred = svc.predict_proba(X_train[~has_label]) + max_proba = np.max(pred, axis=1) + + most_confident_svc = X_train[~has_label][np.argsort(max_proba)[-10:]] + added_by_st = X_train[np.where(got_label)].tolist() + + for row in most_confident_svc.tolist(): + assert row in added_by_st + + +def test_base_estimator_meta_estimator(): + # Check that a meta-estimator relying on an estimator implementing + # `predict_proba` will work even if it does not expose this method before being + # fitted. + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/19119 + + base_estimator = StackingClassifier( + estimators=[ + ("svc_1", SVC(probability=True)), + ("svc_2", SVC(probability=True)), + ], + final_estimator=SVC(probability=True), + cv=2, + ) + + assert hasattr(base_estimator, "predict_proba") + clf = SelfTrainingClassifier(base_estimator=base_estimator) + clf.fit(X_train, y_train_missing_labels) + clf.predict_proba(X_test) + + base_estimator = StackingClassifier( + estimators=[ + ("svc_1", SVC(probability=False)), + ("svc_2", SVC(probability=False)), + ], + final_estimator=SVC(probability=False), + cv=2, + ) + + assert not hasattr(base_estimator, "predict_proba") + clf = SelfTrainingClassifier(base_estimator=base_estimator) + with pytest.raises(AttributeError): + clf.fit(X_train, y_train_missing_labels) + + +def test_self_training_estimator_attribute_error(): + """Check that we raise the proper AttributeErrors when the `base_estimator` + does not implement the `predict_proba` method, which is called from within + `fit`, or `decision_function`, which is decorated with `available_if`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28108 + """ + # `SVC` with `probability=False` does not implement 'predict_proba' that + # is required internally in `fit` of `SelfTrainingClassifier`. We expect + # an AttributeError to be raised. + base_estimator = SVC(probability=False, gamma="scale") + self_training = SelfTrainingClassifier(base_estimator) + + with pytest.raises(AttributeError, match="has no attribute 'predict_proba'"): + self_training.fit(X_train, y_train_missing_labels) + + # `DecisionTreeClassifier` does not implement 'decision_function' and + # should raise an AttributeError + self_training = SelfTrainingClassifier(base_estimator=DecisionTreeClassifier()) + + outer_msg = "This 'SelfTrainingClassifier' has no attribute 'decision_function'" + inner_msg = "'DecisionTreeClassifier' object has no attribute 'decision_function'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + self_training.fit(X_train, y_train_missing_labels).decision_function(X_train) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__)